data dict |
|---|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ18ja0QXC",
"doi": "10.1109/VR.2019.8798070",
"title": "User-Centered Extension of a Locomotion Typology: Movement-Related Sensory Feedback and Spatial Learning",
"normalizedTitle": "User-Centered Extension of a Locomotion Typology: Movement-Related Sensory Feedback and Spatial Learning",
"abstract": "When human operators locomote actively in virtual environments (VE), the movement range often has to be adapted to the limited dimensions of the physical space. This however might lead to a conflict between sensory information originating from user movements and sensory feedback provided through the virtual locomotion. To investigate whether different locomotion strategies that adapt virtual movement to the limited physical space impact cognitive processes, two experiments were conducted. The first experiment used walking in place, the second study scale of locomotion to investigate the impact of locomotion adaptation on the acquisition of spatial knowledge and user experience. We systematically analyzed body-based sensorial conflicts for the different adaptation strategies and reveal that neither walking in place nor scale of locomotion impacts spatial knowledge acquisition or user experience. We can conclude that visual cues indicating locomotion combined with body-based rotational cues seem to be sufficient for the acquisition of spatial knowledge and that locomotion with controllers seems efficient and preferable for users. The results link system-driven typologies with human-centered factors to guide systematic tests of locomotion techniques in virtual environments for future studies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When human operators locomote actively in virtual environments (VE), the movement range often has to be adapted to the limited dimensions of the physical space. This however might lead to a conflict between sensory information originating from user movements and sensory feedback provided through the virtual locomotion. To investigate whether different locomotion strategies that adapt virtual movement to the limited physical space impact cognitive processes, two experiments were conducted. The first experiment used walking in place, the second study scale of locomotion to investigate the impact of locomotion adaptation on the acquisition of spatial knowledge and user experience. We systematically analyzed body-based sensorial conflicts for the different adaptation strategies and reveal that neither walking in place nor scale of locomotion impacts spatial knowledge acquisition or user experience. We can conclude that visual cues indicating locomotion combined with body-based rotational cues seem to be sufficient for the acquisition of spatial knowledge and that locomotion with controllers seems efficient and preferable for users. The results link system-driven typologies with human-centered factors to guide systematic tests of locomotion techniques in virtual environments for future studies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When human operators locomote actively in virtual environments (VE), the movement range often has to be adapted to the limited dimensions of the physical space. This however might lead to a conflict between sensory information originating from user movements and sensory feedback provided through the virtual locomotion. To investigate whether different locomotion strategies that adapt virtual movement to the limited physical space impact cognitive processes, two experiments were conducted. The first experiment used walking in place, the second study scale of locomotion to investigate the impact of locomotion adaptation on the acquisition of spatial knowledge and user experience. We systematically analyzed body-based sensorial conflicts for the different adaptation strategies and reveal that neither walking in place nor scale of locomotion impacts spatial knowledge acquisition or user experience. We can conclude that visual cues indicating locomotion combined with body-based rotational cues seem to be sufficient for the acquisition of spatial knowledge and that locomotion with controllers seems efficient and preferable for users. The results link system-driven typologies with human-centered factors to guide systematic tests of locomotion techniques in virtual environments for future studies.",
"fno": "08798070",
"keywords": [
"Cognition",
"Knowledge Acquisition",
"User Experience",
"Virtual Reality",
"User Centered Extension",
"Locomotion Typology",
"Movement Related Sensory Feedback",
"Spatial Learning",
"Human Operators",
"Virtual Environments",
"Movement Range",
"User Movements",
"Virtual Locomotion",
"Virtual Movement",
"Physical Space Impact Cognitive Processes",
"Locomotion Adaptation",
"User Experience",
"Body Based Sensorial Conflicts",
"Body Based Rotational Cues",
"Human Centered Factors",
"Locomotion Techniques",
"Locomotion Strategies",
"Adaptation Strategies",
"Spatial Knowledge Acquisition",
"Legged Locomotion",
"Floors",
"Visualization",
"Cognitive Processes",
"User Experience",
"Task Analysis",
"Locomotion",
"Body Based Sensorial Cues",
"Spatial Cognition",
"User Experience",
"H 1 2 User Machine Systems Human Information Processing",
"H 5 2 User Interfaces Evaluation Methodology User Centered Design"
],
"authors": [
{
"affiliation": "University of Würzburg Human-Technique-Systems",
"fullName": "Carolin Wienrich",
"givenName": "Carolin",
"surname": "Wienrich",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg Human-Technique-Systems",
"fullName": "Nina Döllinger",
"givenName": "Nina",
"surname": "Döllinger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität Berlin",
"fullName": "Simon Kock",
"givenName": "Simon",
"surname": "Kock",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California San Diego",
"fullName": "Klaus Gramann",
"givenName": "Klaus",
"surname": "Gramann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "690-698",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797721",
"articleId": "1cJ0WHR3fPi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798251",
"articleId": "1cJ0YOUUaqc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892348",
"title": "Steering locomotion by vestibular perturbation in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504709",
"title": "The effect of multi-sensory cues on performance and experience during walking in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504709/12OmNyrqzC0",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446130",
"title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404569",
"title": "Establishing the Range of Perceptually Natural Visual Walking Speeds for Virtual Walking-In-Place Locomotion",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404569/13rRUxAASTb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09744001",
"title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a821",
"title": "Evaluating the Impact of Limited Physical Space on the Navigation Performance of Two Locomotion Methods in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a821/1CJbMX1TyoM",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a696",
"title": "Seamless-walk: Novel Natural Virtual Reality Locomotion Method with a High-Resolution Tactile Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a696/1CJeXaYYtd6",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2022/6814/0/681400a118",
"title": "Crowd Simulation with Feedback Based on Locomotion State",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2022/681400a118/1I6RQ8VlGNi",
"parentPublication": {
"id": "proceedings/cw/2022/6814/0",
"title": "2022 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a346",
"title": "Spring Stepper: A Seated VR Locomotion Controller",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a346/1oZBBswUSzK",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a452",
"title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXFgLAfSw",
"doi": "10.1109/VRW52623.2021.00082",
"title": "The Effectiveness of Locomotion Interfaces Depends on Self-Motion Cues, Environmental Cues, and the Individual",
"normalizedTitle": "The Effectiveness of Locomotion Interfaces Depends on Self-Motion Cues, Environmental Cues, and the Individual",
"abstract": "The proliferation of locomotion interfaces for virtual reality necessitates a framework for predicting and evaluating navigational success. Spatial updating-the process of mentally updating one's self-location during locomotion-is a core component of navigation, is easy to measure, and is sensitive to common elements of locomotion interfaces. This paper highlights three factors that influence spatial updating: body-based self-motion cues, environmental cues, and characteristics of the individual. The concordance framework, which characterizes locomotion interfaces based on agreement between body movement and movement through the environment, serves as a useful starting point for understanding the effectiveness of locomotion interfaces for enabling accurate navigation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The proliferation of locomotion interfaces for virtual reality necessitates a framework for predicting and evaluating navigational success. Spatial updating-the process of mentally updating one's self-location during locomotion-is a core component of navigation, is easy to measure, and is sensitive to common elements of locomotion interfaces. This paper highlights three factors that influence spatial updating: body-based self-motion cues, environmental cues, and characteristics of the individual. The concordance framework, which characterizes locomotion interfaces based on agreement between body movement and movement through the environment, serves as a useful starting point for understanding the effectiveness of locomotion interfaces for enabling accurate navigation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The proliferation of locomotion interfaces for virtual reality necessitates a framework for predicting and evaluating navigational success. Spatial updating-the process of mentally updating one's self-location during locomotion-is a core component of navigation, is easy to measure, and is sensitive to common elements of locomotion interfaces. This paper highlights three factors that influence spatial updating: body-based self-motion cues, environmental cues, and characteristics of the individual. The concordance framework, which characterizes locomotion interfaces based on agreement between body movement and movement through the environment, serves as a useful starting point for understanding the effectiveness of locomotion interfaces for enabling accurate navigation.",
"fno": "405700a391",
"keywords": [
"Interactive Devices",
"User Interfaces",
"Virtual Reality",
"Locomotion Interfaces",
"Environmental Cues",
"Spatial Updating",
"Influence Spatial Updating",
"Body Based Self Motion Cues",
"Training",
"Legged Locomotion",
"Three Dimensional Displays",
"Navigation",
"Conferences",
"Taxonomy",
"Virtual Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Iowa State University",
"fullName": "Jonathan W. Kelly",
"givenName": "Jonathan W.",
"surname": "Kelly",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University",
"fullName": "Stephen B. Gilbert",
"givenName": "Stephen B.",
"surname": "Gilbert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "391-392",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a389",
"articleId": "1tnWx88Rxuw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a393",
"articleId": "1tnX2vv1TS8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892348",
"title": "Steering locomotion by vestibular perturbation in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446165",
"title": "A Threefold Approach for Precise and Efficient Locomotion in Virtual Environments with Varying Accessibility",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446165/13bd1AIBM28",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446130",
"title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/06109251",
"title": "The Design and Evaluation of a Large-Scale Real-Walking Locomotion Interface",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/06109251/13rRUygT7mV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/06/08580399",
"title": "Virtual Locomotion: A Survey",
"doi": null,
"abstractUrl": "/journal/tg/2020/06/08580399/17D45VUZMU0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714054",
"title": "Remote research on locomotion interfaces for virtual reality: Replication of a lab-based study on teleporting interfaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714054/1B0XZAXWaIg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a821",
"title": "Evaluating the Impact of Limited Physical Space on the Navigation Performance of Two Locomotion Methods in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a821/1CJbMX1TyoM",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08809840",
"title": "NaviBoard and NaviChair: Limited Translation Combined with Full Rotation for Efficient Virtual Locomotion",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08809840/1cHE3iFCYpy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a452",
"title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09629264",
"title": "Leaning-Based Interfaces Improve Ground-Based VR Locomotion in Reach-the-Target, Follow-the-Path, and Racing Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09629264/1yXvJdO9qaQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXRY815xS",
"doi": "10.1109/VRW52623.2021.00084",
"title": "Is Walking Necessary for Effective Locomotion and Interaction in VR?",
"normalizedTitle": "Is Walking Necessary for Effective Locomotion and Interaction in VR?",
"abstract": "This paper reports on a work-in-progress study to investigate if/how leaning-based interfaces affect simultaneous locomotion and interaction. We compare physical walking and Controller with a seated (i.e., HeadJoystick) and standing (i.e., Naviboard) leaning-based interface. We disambiguated performance in locomotion versus interaction using a novel experimental paradigm, where participants should point toward moving targets using their virtual light-saber while actively following a moving platform.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper reports on a work-in-progress study to investigate if/how leaning-based interfaces affect simultaneous locomotion and interaction. We compare physical walking and Controller with a seated (i.e., HeadJoystick) and standing (i.e., Naviboard) leaning-based interface. We disambiguated performance in locomotion versus interaction using a novel experimental paradigm, where participants should point toward moving targets using their virtual light-saber while actively following a moving platform.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper reports on a work-in-progress study to investigate if/how leaning-based interfaces affect simultaneous locomotion and interaction. We compare physical walking and Controller with a seated (i.e., HeadJoystick) and standing (i.e., Naviboard) leaning-based interface. We disambiguated performance in locomotion versus interaction using a novel experimental paradigm, where participants should point toward moving targets using their virtual light-saber while actively following a moving platform.",
"fno": "405700a395",
"keywords": [
"Human Computer Interaction",
"Virtual Reality",
"Physical Walking",
"Head Joystick",
"Experimental Paradigm",
"VR",
"Work In Progress Study",
"Leaning Based Interfaces",
"Simultaneous Locomotion",
"Naviboard",
"Virtual Light Saber",
"Legged Locomotion",
"Three Dimensional Displays",
"Conferences",
"Virtual Reality",
"User Interfaces",
"Task Analysis",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Simon Fraser University,Canada",
"fullName": "Abraham M. Hashemian",
"givenName": "Abraham M.",
"surname": "Hashemian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University,Canada",
"fullName": "Ernst Kruijff",
"givenName": "Ernst",
"surname": "Kruijff",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University,Canada",
"fullName": "Ashu Adhikari",
"givenName": "Ashu",
"surname": "Adhikari",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University,Canada",
"fullName": "Markus von der Heyde",
"givenName": "Markus",
"surname": "von der Heyde",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University,Canada",
"fullName": "Ivan Aguilar",
"givenName": "Ivan",
"surname": "Aguilar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University,Canada",
"fullName": "Bernhard E. Riecke",
"givenName": "Bernhard E.",
"surname": "Riecke",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "395-396",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a393",
"articleId": "1tnX2vv1TS8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a397",
"articleId": "1tnWHkeIdRS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2015/6886/0/07131718",
"title": "Design and evaluation of a visual acclimation aid for a semi-natural locomotion device",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131718/12OmNBNM8RA",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892348",
"title": "Steering locomotion by vestibular perturbation in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460030",
"title": "Eye tracking for locomotion prediction in redirected walking",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460030/12OmNz4SOsF",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/06109251",
"title": "The Design and Evaluation of a Large-Scale Real-Walking Locomotion Interface",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/06109251/13rRUygT7mV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09744001",
"title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08809840",
"title": "NaviBoard and NaviChair: Limited Translation Combined with Full Rotation for Efficient Virtual Locomotion",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08809840/1cHE3iFCYpy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090608",
"title": "Towards an Affordance of Embodied Locomotion Interfaces in VR: How to Know How to Move?",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090608/1jIxnjPP9Ti",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a769",
"title": "A Seamless Natural Locomotion Concept for VR Adventure Game \"The Amusement\"",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a769/1tnXtsjoHaU",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09629264",
"title": "Leaning-Based Interfaces Improve Ground-Based VR Locomotion in Reach-the-Target, Follow-the-Path, and Racing Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09629264/1yXvJdO9qaQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45Vw15tG",
"doi": "10.1109/CVPR.2018.00981",
"title": "SketchyGAN: Towards Diverse and Realistic Sketch to Image Synthesis",
"normalizedTitle": "SketchyGAN: Towards Diverse and Realistic Sketch to Image Synthesis",
"abstract": "Synthesizing realistic images from human drawn sketches is a challenging problem in computer graphics and vision. Existing approaches either need exact edge maps, or rely on retrieval of existing photographs. In this work, we propose a novel Generative Adversarial Network (GAN) approach that synthesizes plausible images from 50 categories including motorcycles, horses and couches. We demonstrate a data augmentation technique for sketches which is fully automatic, and we show that the augmented data is helpful to our task. We introduce a new network building block suitable for both the generator and discriminator which improves the information flow by injecting the input image at multiple scales. Compared to state-of-the-art image translation methods, our approach generates more realistic images and achieves significantly higher Inception Scores.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Synthesizing realistic images from human drawn sketches is a challenging problem in computer graphics and vision. Existing approaches either need exact edge maps, or rely on retrieval of existing photographs. In this work, we propose a novel Generative Adversarial Network (GAN) approach that synthesizes plausible images from 50 categories including motorcycles, horses and couches. We demonstrate a data augmentation technique for sketches which is fully automatic, and we show that the augmented data is helpful to our task. We introduce a new network building block suitable for both the generator and discriminator which improves the information flow by injecting the input image at multiple scales. Compared to state-of-the-art image translation methods, our approach generates more realistic images and achieves significantly higher Inception Scores.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Synthesizing realistic images from human drawn sketches is a challenging problem in computer graphics and vision. Existing approaches either need exact edge maps, or rely on retrieval of existing photographs. In this work, we propose a novel Generative Adversarial Network (GAN) approach that synthesizes plausible images from 50 categories including motorcycles, horses and couches. We demonstrate a data augmentation technique for sketches which is fully automatic, and we show that the augmented data is helpful to our task. We introduce a new network building block suitable for both the generator and discriminator which improves the information flow by injecting the input image at multiple scales. Compared to state-of-the-art image translation methods, our approach generates more realistic images and achieves significantly higher Inception Scores.",
"fno": "642000j416",
"keywords": [
"Edge Detection",
"Feature Extraction",
"Image Reconstruction",
"Image Representation",
"Neural Nets",
"Realistic Images",
"Computer Graphics",
"Horses",
"Couches",
"Feature Representations",
"Motorcycles",
"Computer Vision",
"Generative Adversarial Network",
"Sketch To Image Synthesis",
"Image Translation",
"Data Augmentation",
"Human Drawn Sketches",
"Realistic Images",
"Sketchy GAN",
"Image Edge Detection",
"Image Generation",
"Gallium Nitride",
"Training",
"Databases",
"Task Analysis",
"Generative Adversarial Networks"
],
"authors": [
{
"affiliation": null,
"fullName": "Wengling Chen",
"givenName": "Wengling",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "James Hays",
"givenName": "James",
"surname": "Hays",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "9416-9425",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000j407",
"articleId": "17D45WIXbNY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000j426",
"articleId": "17D45WHONly",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032f908",
"title": "StackGAN: Text to Photo-Realistic Image Synthesis with Stacked Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032f908/12OmNA0MZ6U",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a083",
"title": "High-Quality Facial Photo-Sketch Synthesis Using Multi-Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a083/12OmNBv2CeE",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/08/08411144",
"title": "StackGAN++: Realistic Image Synthesis with Stacked Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/journal/tp/2019/08/08411144/13rRUynZ5pp",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g713",
"title": "Towards Open-Set Identity Preserving Face Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g713/17D45VUZMYT",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545383",
"title": "Semantic Image Synthesis via Conditional Cycle-Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545383/17D45VtKius",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545787",
"title": "Traffic Sign Image Synthesis with Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545787/17D45WWzW3L",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2019/0089/0/08756632",
"title": "Using Photorealistic Face Synthesis and Domain Adaptation to Improve Facial Expression Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2019/08756632/1bzYoRTaYLK",
"parentPublication": {
"id": "proceedings/fg/2019/0089/0",
"title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aike/2019/1488/0/148800a289",
"title": "Realistic Data Synthesis Using Enhanced Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/aike/2019/148800a289/1ckrzXV9yKI",
"parentPublication": {
"id": "proceedings/aike/2019/1488/0",
"title": "2019 IEEE Second International Conference on Artificial Intelligence and Knowledge Engineering (AIKE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102904",
"title": "Text to Image Synthesis With Bidirectional Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102904/1kwr76JKhoc",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i362",
"title": "BachGAN: High-Resolution Image Synthesis From Salient Object Layout",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i362/1m3nyI7NnGg",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XwUAGS",
"doi": "10.1109/CVPR.2018.00837",
"title": "Matching Adversarial Networks",
"normalizedTitle": "Matching Adversarial Networks",
"abstract": "Generative Adversarial Nets (GANs) and Conditonal GANs (CGANs) show that using a trained network as loss function (discriminator) enables to synthesize highly structured outputs (e.g. natural images). However, applying a discriminator network as a universal loss function for common supervised tasks (e.g. semantic segmentation, line detection, depth estimation) is considerably less successful. We argue that the main difficulty of applying CGANs to supervised tasks is that the generator training consists of optimizing a loss function that does not depend directly on the ground truth labels. To overcome this, we propose to replace the discriminator with a matching network taking into account both the ground truth outputs as well as the generated examples. As a consequence, the generator loss function also depends on the targets of the training examples, thus facilitating learning. We demonstrate on three computer vision tasks that this approach can significantly outperform CGANs achieving comparable or superior results to task-specific solutions and results in stable training. Importantly, this is a general approach that does not require the use of task-specific loss functions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Generative Adversarial Nets (GANs) and Conditonal GANs (CGANs) show that using a trained network as loss function (discriminator) enables to synthesize highly structured outputs (e.g. natural images). However, applying a discriminator network as a universal loss function for common supervised tasks (e.g. semantic segmentation, line detection, depth estimation) is considerably less successful. We argue that the main difficulty of applying CGANs to supervised tasks is that the generator training consists of optimizing a loss function that does not depend directly on the ground truth labels. To overcome this, we propose to replace the discriminator with a matching network taking into account both the ground truth outputs as well as the generated examples. As a consequence, the generator loss function also depends on the targets of the training examples, thus facilitating learning. We demonstrate on three computer vision tasks that this approach can significantly outperform CGANs achieving comparable or superior results to task-specific solutions and results in stable training. Importantly, this is a general approach that does not require the use of task-specific loss functions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Generative Adversarial Nets (GANs) and Conditonal GANs (CGANs) show that using a trained network as loss function (discriminator) enables to synthesize highly structured outputs (e.g. natural images). However, applying a discriminator network as a universal loss function for common supervised tasks (e.g. semantic segmentation, line detection, depth estimation) is considerably less successful. We argue that the main difficulty of applying CGANs to supervised tasks is that the generator training consists of optimizing a loss function that does not depend directly on the ground truth labels. To overcome this, we propose to replace the discriminator with a matching network taking into account both the ground truth outputs as well as the generated examples. As a consequence, the generator loss function also depends on the targets of the training examples, thus facilitating learning. We demonstrate on three computer vision tasks that this approach can significantly outperform CGANs achieving comparable or superior results to task-specific solutions and results in stable training. Importantly, this is a general approach that does not require the use of task-specific loss functions.",
"fno": "642000i024",
"keywords": [
"Computer Vision",
"Image Matching",
"Image Segmentation",
"Learning Artificial Intelligence",
"Discriminator Network",
"Universal Loss Function",
"Semantic Segmentation",
"CGA Ns",
"Ground Truth Labels",
"Generator Loss Function",
"Computer Vision Tasks",
"Network Training",
"Supervised Tasks",
"Adversarial Networks Matching",
"Generative Adversarial Nets",
"Conditonal GA Ns",
"Generators",
"Training",
"Gallium Nitride",
"Task Analysis",
"Perturbation Methods",
"Generative Adversarial Networks",
"Image Segmentation"
],
"authors": [
{
"affiliation": null,
"fullName": "Gellert Mattyus",
"givenName": "Gellert",
"surname": "Mattyus",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Raquel Urtasun",
"givenName": "Raquel",
"surname": "Urtasun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "8024-8032",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000i014",
"articleId": "17D45WLdYQJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000i033",
"articleId": "17D45WK5Aoi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032c813",
"title": "Least Squares Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c813/12OmNB7cjhc",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2017/1235/0/08457952",
"title": "Generative Adversarial Networks for Classification",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2017/08457952/13xI8AQ5AJ6",
"parentPublication": {
"id": "proceedings/aipr/2017/1235/0",
"title": "2017 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a115",
"title": "Deep Feature Similarity for Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a115/17D45Wuc39X",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546246",
"title": "Deep Generative Adversarial Networks for the Sparse Signal Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546246/17D45X7VTge",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000a413",
"title": "Monocular Depth Prediction Using Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000a413/17D45XH89pp",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2018/9159/0/08594958",
"title": "TreeGAN: Syntax-Aware Sequence Generation with Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2018/08594958/17D45Xtvp9B",
"parentPublication": {
"id": "proceedings/icdm/2018/9159/0",
"title": "2018 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102779",
"title": "A Multi-Player Minimax Game for Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102779/1kwr6BsKnRu",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdpsw/2020/7445/0/09150388",
"title": "Parallel/distributed implementation of cellular training for generative adversarial neural networks",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2020/09150388/1lPGHc3Qm7C",
"parentPublication": {
"id": "proceedings/ipdpsw/2020/7445/0",
"title": "2020 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h796",
"title": "MSG-GAN: Multi-Scale Gradients for Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h796/1m3oneHfgTS",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/05/09290435",
"title": "Optimizing Latent Distributions for Non-Adversarial Generative Networks",
"doi": null,
"abstractUrl": "/journal/tp/2022/05/09290435/1prKHvxF2lW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmL89RkOSk",
"doi": "10.1109/ICCV48922.2021.00085",
"title": "OpenGAN: Open-Set Recognition via Open Data Generation",
"normalizedTitle": "OpenGAN: Open-Set Recognition via Open Data Generation",
"abstract": "Real-world machine learning systems need to analyze novel testing data that differs from the training data. In K-way classification, this is crisply formulated as open-set recognition, core to which is the ability to discriminate open-set data outside the K closed-set classes. Two conceptually elegant ideas for open-set discrimination are: 1) discriminatively learning an open-vs-closed binary discriminator by exploiting some outlier data as the open-set, and 2) unsupervised learning the closed-set data distribution with a GAN and using its discriminator as the open-set likelihood function. However, the former generalizes poorly to diverse open test data due to overfitting to the training outliers, which unlikely exhaustively span the open-world. The latter does not work well, presumably due to the instable training of GANs. Motivated by the above, we propose OpenGAN, which addresses the limitation of each approach by combining them with several technical insights. First, we show that a carefully selected GAN-discriminator on some real outlier data already achieves the state-of-the-art. Second, we augment the available set of real open training examples with adversarially synthesized \"fake\" data. Third and most importantly, we build the discriminator over the features computed by the closed-world K-way networks. Extensive experiments show that Open-GAN significantly outperforms prior open-set methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Real-world machine learning systems need to analyze novel testing data that differs from the training data. In K-way classification, this is crisply formulated as open-set recognition, core to which is the ability to discriminate open-set data outside the K closed-set classes. Two conceptually elegant ideas for open-set discrimination are: 1) discriminatively learning an open-vs-closed binary discriminator by exploiting some outlier data as the open-set, and 2) unsupervised learning the closed-set data distribution with a GAN and using its discriminator as the open-set likelihood function. However, the former generalizes poorly to diverse open test data due to overfitting to the training outliers, which unlikely exhaustively span the open-world. The latter does not work well, presumably due to the instable training of GANs. Motivated by the above, we propose OpenGAN, which addresses the limitation of each approach by combining them with several technical insights. First, we show that a carefully selected GAN-discriminator on some real outlier data already achieves the state-of-the-art. Second, we augment the available set of real open training examples with adversarially synthesized \"fake\" data. Third and most importantly, we build the discriminator over the features computed by the closed-world K-way networks. Extensive experiments show that Open-GAN significantly outperforms prior open-set methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Real-world machine learning systems need to analyze novel testing data that differs from the training data. In K-way classification, this is crisply formulated as open-set recognition, core to which is the ability to discriminate open-set data outside the K closed-set classes. Two conceptually elegant ideas for open-set discrimination are: 1) discriminatively learning an open-vs-closed binary discriminator by exploiting some outlier data as the open-set, and 2) unsupervised learning the closed-set data distribution with a GAN and using its discriminator as the open-set likelihood function. However, the former generalizes poorly to diverse open test data due to overfitting to the training outliers, which unlikely exhaustively span the open-world. The latter does not work well, presumably due to the instable training of GANs. Motivated by the above, we propose OpenGAN, which addresses the limitation of each approach by combining them with several technical insights. First, we show that a carefully selected GAN-discriminator on some real outlier data already achieves the state-of-the-art. Second, we augment the available set of real open training examples with adversarially synthesized \"fake\" data. Third and most importantly, we build the discriminator over the features computed by the closed-world K-way networks. Extensive experiments show that Open-GAN significantly outperforms prior open-set methods.",
"fno": "281200a793",
"keywords": [
"Training",
"Image Segmentation",
"Image Recognition",
"Semantics",
"Training Data",
"Machine Learning",
"Generative Adversarial Networks",
"Recognition And Classification",
"Adversarial Learning"
],
"authors": [
{
"affiliation": "Carnegie Mellon University",
"fullName": "Shu Kong",
"givenName": "Shu",
"surname": "Kong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University",
"fullName": "Deva Ramanan",
"givenName": "Deva",
"surname": "Ramanan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "793-802",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200a782",
"articleId": "1BmEMHlM0Du",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200a803",
"articleId": "1BmIaHFtfJm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/csde/2021/9552/0/09718461",
"title": "A Conditional Generative Adversarial Network for Non-rigid Point Set Registration",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2021/09718461/1BogMWGWQUg",
"parentPublication": {
"id": "proceedings/csde/2021/9552/0",
"title": "2021 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09799769",
"title": "<italic>OpenGAN</italic>: Open-Set Recognition Via Open Data Generation",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09799769/1Eho7vs5sxq",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1175",
"title": "OSSGAN: Open-Set Semi-Supervised Image Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1175/1H1icQxfSNO",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600g284",
"title": "MORGAN: Meta-Learning-based Few-Shot Open-Set Recognition via Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600g284/1L6LAD5ZNWE",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2019/3014/0/301400a178",
"title": "TH-GAN: Generative Adversarial Network Based Transfer Learning for Historical Chinese Character Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2019/301400a178/1h81u6jDzSE",
"parentPublication": {
"id": "proceedings/icdar/2019/3014/0",
"title": "2019 International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2020/7081/0/708100a479",
"title": "An Feature Image Generation Based on Adversarial Generation Network",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2020/708100a479/1iEREwoRJF6",
"parentPublication": {
"id": "proceedings/icmtma/2020/7081/0",
"title": "2020 12th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093525",
"title": "FX-GAN: Self-Supervised GAN Learning via Feature Exchange",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093525/1jPbxvOsk6s",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102917",
"title": "Matchinggan: Matching-Based Few-Shot Image Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102917/1kwr3cBl864",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2020/4380/0/438000a864",
"title": "Generation of malicious webpage samples based on GAN",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2020/438000a864/1r54cGDIsw0",
"parentPublication": {
"id": "proceedings/trustcom/2020/4380/0",
"title": "2020 IEEE 19th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700d941",
"title": "InfoMax-GAN: Improved Adversarial Image Generation via Information Maximization and Contrastive Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700d941/1uqGe5x8U5W",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1dx8nEYXhp6",
"title": "2019 Twelfth International Conference on Contemporary Computing (IC3)",
"acronym": "ic3",
"groupId": "1803947",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1dx8qXnyVvq",
"doi": "10.1109/IC3.2019.8844885",
"title": "Human Sketch Recognition using Generative Adversarial Networks and One-Shot Learning",
"normalizedTitle": "Human Sketch Recognition using Generative Adversarial Networks and One-Shot Learning",
"abstract": "We introduce a model for face recognition from sketches. The system follows a multi-layered approach and is built by combining sketch to image generation, and face recognition methods. First, sketch to photo generation is achieved by employing cGAN based pix2pix model. Then face recognition is done by One Shot Learning using FaceNet. The sketch recognition model developed through this research is able to yield good results on multiple datasets with the generated images performing with an accuracy close to that with the original images. The average difference from the recognition accuracy as compared to with original images was approximately three percent on the datasets used. Real time recognition using sketches as inputs is also effectively explored through the combination of these two layers of approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce a model for face recognition from sketches. The system follows a multi-layered approach and is built by combining sketch to image generation, and face recognition methods. First, sketch to photo generation is achieved by employing cGAN based pix2pix model. Then face recognition is done by One Shot Learning using FaceNet. The sketch recognition model developed through this research is able to yield good results on multiple datasets with the generated images performing with an accuracy close to that with the original images. The average difference from the recognition accuracy as compared to with original images was approximately three percent on the datasets used. Real time recognition using sketches as inputs is also effectively explored through the combination of these two layers of approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce a model for face recognition from sketches. The system follows a multi-layered approach and is built by combining sketch to image generation, and face recognition methods. First, sketch to photo generation is achieved by employing cGAN based pix2pix model. Then face recognition is done by One Shot Learning using FaceNet. The sketch recognition model developed through this research is able to yield good results on multiple datasets with the generated images performing with an accuracy close to that with the original images. The average difference from the recognition accuracy as compared to with original images was approximately three percent on the datasets used. Real time recognition using sketches as inputs is also effectively explored through the combination of these two layers of approach.",
"fno": "08844885",
"keywords": [
"Face Recognition",
"Learning Artificial Intelligence",
"Face Recognition",
"Multilayered Approach",
"Image Generation",
"Photo Generation",
"C GAN Based Pix 2 Pix Model",
"Human Sketch Recognition",
"Generative Adversarial Networks",
"One Shot Learning",
"Face Recognition",
"Training",
"Image Synthesis",
"Image Segmentation",
"Generative Adversarial Networks",
"Task Analysis",
"Generators",
"Generative Adversarial Networks",
"One Shot Learning",
"Sketch Recognition",
"Sketch To Image Generation",
"Conditional Generative Adversarial Network"
],
"authors": [
{
"affiliation": "ADGITM, GGSIPU, New Delhi, India",
"fullName": "Deepanshu Wadhwa",
"givenName": "Deepanshu",
"surname": "Wadhwa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ADGITM, GGSIPU, New Delhi, India",
"fullName": "Utkarsh Maharana",
"givenName": "Utkarsh",
"surname": "Maharana",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ADGITM, GGSIPU, New Delhi, India",
"fullName": "Devina Shah",
"givenName": "Devina",
"surname": "Shah",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ADGITM, GGSIPU, New Delhi, India",
"fullName": "Vaibhav Yadav",
"givenName": "Vaibhav",
"surname": "Yadav",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ADGITM, GGSIPU, New Delhi, India",
"fullName": "Prashant Pandey",
"givenName": "Prashant",
"surname": "Pandey",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ic3",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3591-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08844927",
"articleId": "1dx8nPpR6Q8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08844925",
"articleId": "1dx8pW7071S",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2018/2335/0/233501a083",
"title": "High-Quality Facial Photo-Sketch Synthesis Using Multi-Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a083/12OmNBv2CeE",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2009/11/ttp2009111955",
"title": "Face Photo-Sketch Synthesis and Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2009/11/ttp2009111955/13rRUxOveaX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2017/2652/0/2652a432",
"title": "FaceNet Based Face Sketch Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2017/2652a432/17D45WrVg0v",
"parentPublication": {
"id": "proceedings/csci/2017/2652/0",
"title": "2017 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956126",
"title": "Deep face generation from a rough sketch using multi-level generative adversarial networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956126/1IHoDYoZqnK",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2019/0089/0/08756563",
"title": "Improving Face Sketch Recognition via Adversarial Sketch-Photo Transformation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2019/08756563/1bzYpqF2pFK",
"parentPublication": {
"id": "proceedings/fg/2019/0089/0",
"title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300f823",
"title": "SketchGAN: Joint Sketch Completion and Recognition With Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300f823/1gyrBC0WYEM",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412022",
"title": "Attributes Aware Face Generation with Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412022/1tmimHQIteE",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428348",
"title": "High-Quality Face Sketch Synthesis via Geometric Normalization and Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428348/1uim6wBM1Mc",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d349",
"title": "Training Generative Adversarial Networks in One Stage",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d349/1yeHTxuRRZe",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1kwqNHC4Fy0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1kwrlxsf48o",
"doi": "10.1109/ICME46284.2020.9102794",
"title": "Eigan: Enhanced Inpainting Generative Adversarial Network",
"normalizedTitle": "Eigan: Enhanced Inpainting Generative Adversarial Network",
"abstract": "Generating coherent texture in a repaired region, especially at the boundary, is one of the challenges in image inpainting. To maintain the coherence in the edge transitional region, we propose an efficient framework, enhanced inpainting generative adversarial network (EIGAN). EIGAN is composed of a multi-resolution intersection encoder, a dual decoder, and an adversarial patch discriminator. The encoder and the decoder are used to process multi-resolution semantic information in a parallel and cross-connected fashion. Specially, we utilize a soft-contextual attention module embedded into the decoder to capture contextual information so that the attention module can help the decoder to generate coherent texture and content. Moreover, to improve the details, we introduce an advanced patch discriminator with relativistic adversarial loss named adversarial patch discriminator. Experimental results show that our improvements can enhance the coherence and quality of the completed image, and enable EIGAN to outperform the state-of-the-art image inpainting methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Generating coherent texture in a repaired region, especially at the boundary, is one of the challenges in image inpainting. To maintain the coherence in the edge transitional region, we propose an efficient framework, enhanced inpainting generative adversarial network (EIGAN). EIGAN is composed of a multi-resolution intersection encoder, a dual decoder, and an adversarial patch discriminator. The encoder and the decoder are used to process multi-resolution semantic information in a parallel and cross-connected fashion. Specially, we utilize a soft-contextual attention module embedded into the decoder to capture contextual information so that the attention module can help the decoder to generate coherent texture and content. Moreover, to improve the details, we introduce an advanced patch discriminator with relativistic adversarial loss named adversarial patch discriminator. Experimental results show that our improvements can enhance the coherence and quality of the completed image, and enable EIGAN to outperform the state-of-the-art image inpainting methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Generating coherent texture in a repaired region, especially at the boundary, is one of the challenges in image inpainting. To maintain the coherence in the edge transitional region, we propose an efficient framework, enhanced inpainting generative adversarial network (EIGAN). EIGAN is composed of a multi-resolution intersection encoder, a dual decoder, and an adversarial patch discriminator. The encoder and the decoder are used to process multi-resolution semantic information in a parallel and cross-connected fashion. Specially, we utilize a soft-contextual attention module embedded into the decoder to capture contextual information so that the attention module can help the decoder to generate coherent texture and content. Moreover, to improve the details, we introduce an advanced patch discriminator with relativistic adversarial loss named adversarial patch discriminator. Experimental results show that our improvements can enhance the coherence and quality of the completed image, and enable EIGAN to outperform the state-of-the-art image inpainting methods.",
"fno": "09102794",
"keywords": [
"Image Enhancement",
"Image Resolution",
"Image Texture",
"Neural Nets",
"Inpainting Generative Adversarial Network",
"Image Inpainting Methods",
"Relativistic Adversarial Loss",
"Soft Contextual Attention Module",
"Multiresolution Semantic Information",
"Adversarial Patch Discriminator",
"Dual Decoder",
"Multiresolution Intersection Encoder",
"EIGAN",
"Edge Transitional Region",
"Convolution",
"Decoding",
"Semantics",
"Feature Extraction",
"Generative Adversarial Networks",
"Coherence",
"Generators",
"Image Inpainting",
"Multi Resolution Intersection Encoder",
"Dual Decoder",
"Soft Contextual Attention Module",
"Relativistic Patch Discriminator"
],
"authors": [
{
"affiliation": "Fudan University,Shanghai Key Laboratory of Intelligent Information Processing, School of Computer Science,China",
"fullName": "Feiyu Chen",
"givenName": "Feiyu",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fudan University,Shanghai Key Laboratory of Intelligent Information Processing, School of Computer Science,China",
"fullName": "Wei Deng",
"givenName": "Wei",
"surname": "Deng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fudan University,Shanghai Key Laboratory of Intelligent Information Processing, School of Computer Science,China",
"fullName": "Chuanfa Zhang",
"givenName": "Chuanfa",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fudan University,Shanghai Key Laboratory of Intelligent Information Processing, School of Computer Science,China",
"fullName": "Kangzheng Gu",
"givenName": "Kangzheng",
"surname": "Gu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fudan University,Shanghai Key Laboratory of Intelligent Information Processing, School of Computer Science,China",
"fullName": "Wenqiang Zhang",
"givenName": "Wenqiang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-1331-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09102822",
"articleId": "1kwqQPn0xmo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09102832",
"articleId": "1kwrg2UZmbC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ictai/2018/7449/0/744900a242",
"title": "Sequence Generative Adversarial Network for Long Text Summarization",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2018/744900a242/17D45WIXbQb",
"parentPublication": {
"id": "proceedings/ictai/2018/7449/0",
"title": "2018 IEEE 30th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545701",
"title": "Pyramid Embedded Generative Adversarial Network for Automated Font Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545701/17D45Wc1IJp",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a588",
"title": "Image Inpainting: A Contextual Consistent and Deep Generative Adversarial Training Approach",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a588/17D45WgziOP",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a977",
"title": "Image Multi-Inpainting via Progressive Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a977/1G56VVqeIco",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2022/9744/0/974400a923",
"title": "Image Inpainting with Context Flow Network",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2022/974400a923/1MrFXR3HdXa",
"parentPublication": {
"id": "proceedings/ictai/2022/9744/0",
"title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2020/7083/0/708300a259",
"title": "Research on Image Inpainting Based on Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2020/708300a259/1oCn35eA06I",
"parentPublication": {
"id": "proceedings/iccnea/2020/7083/0",
"title": "2020 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispds/2020/9668/0/966800a040",
"title": "Dynamic Graph Generative Adversarial Networks for Skeleton-Based Human Motion Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/ispds/2020/966800a040/1oRiVW6zXby",
"parentPublication": {
"id": "proceedings/ispds/2020/9668/0",
"title": "2020 International Conference on Information Science, Parallel and Distributed Systems (ISPDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a137",
"title": "Research on Character Image Inpainting based on Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a137/1p1gu7CZsqI",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaice/2020/9146/0/914600a276",
"title": "Semantic image inpainting based on Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icaice/2020/914600a276/1rCg9gkLLlC",
"parentPublication": {
"id": "proceedings/icaice/2020/9146/0",
"title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2021/3954/0/395400a007",
"title": "Sketch-based Anime Hairstyle Editing with Generative Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2021/395400a007/1wnPu0lkCvS",
"parentPublication": {
"id": "proceedings/nicoint/2021/3954/0",
"title": "2021 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1rCg5NWvMis",
"title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"acronym": "icaice",
"groupId": "1840544",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1rCg9gkLLlC",
"doi": "10.1109/ICAICE51518.2020.00059",
"title": "Semantic image inpainting based on Generative Adversarial Networks",
"normalizedTitle": "Semantic image inpainting based on Generative Adversarial Networks",
"abstract": "Semantic image inpainting is different from traditional methods. Although traditional image inpainting methods can achieve good results, they ignore the context, which makes the inpainting images look less natural. Semantic image inpainting does better in this aspect. In this paper, we propose a new method about semantic image inpainting by improving the model of Generative Adversarial Networks. The generator of our network model refers to Boundary Equilibrium Generative Adversarial Networks and adds spectral normalization to improving unstable training. Then, our network chooses discriminator of Self-Attention Generative Adversarial Networks which is more concise. After training, the network model is limited to generate image encoding by context loss and prior loss to make it as similar as possible to the missing image encoding. Then, the generated images encoding corresponding to the without missing parts of the real images are obtained. After fusing the generated missing parts with the unmissed parts of the real image, it is the inpainting image. By the comparisons of vision and quantization, it shows that our method can well complete the task of large missing regions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Semantic image inpainting is different from traditional methods. Although traditional image inpainting methods can achieve good results, they ignore the context, which makes the inpainting images look less natural. Semantic image inpainting does better in this aspect. In this paper, we propose a new method about semantic image inpainting by improving the model of Generative Adversarial Networks. The generator of our network model refers to Boundary Equilibrium Generative Adversarial Networks and adds spectral normalization to improving unstable training. Then, our network chooses discriminator of Self-Attention Generative Adversarial Networks which is more concise. After training, the network model is limited to generate image encoding by context loss and prior loss to make it as similar as possible to the missing image encoding. Then, the generated images encoding corresponding to the without missing parts of the real images are obtained. After fusing the generated missing parts with the unmissed parts of the real image, it is the inpainting image. By the comparisons of vision and quantization, it shows that our method can well complete the task of large missing regions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Semantic image inpainting is different from traditional methods. Although traditional image inpainting methods can achieve good results, they ignore the context, which makes the inpainting images look less natural. Semantic image inpainting does better in this aspect. In this paper, we propose a new method about semantic image inpainting by improving the model of Generative Adversarial Networks. The generator of our network model refers to Boundary Equilibrium Generative Adversarial Networks and adds spectral normalization to improving unstable training. Then, our network chooses discriminator of Self-Attention Generative Adversarial Networks which is more concise. After training, the network model is limited to generate image encoding by context loss and prior loss to make it as similar as possible to the missing image encoding. Then, the generated images encoding corresponding to the without missing parts of the real images are obtained. After fusing the generated missing parts with the unmissed parts of the real image, it is the inpainting image. By the comparisons of vision and quantization, it shows that our method can well complete the task of large missing regions.",
"fno": "914600a276",
"keywords": [
"Image Coding",
"Image Representation",
"Image Restoration",
"Image Texture",
"Semantic Image Inpainting",
"Traditional Image Inpainting Methods",
"Inpainting Image",
"Network Model",
"Boundary Equilibrium Generative Adversarial Networks",
"Missing Image Encoding",
"Training",
"Image Coding",
"Semantics",
"Generative Adversarial Networks",
"Generators",
"Task Analysis",
"Faces",
"Semantic Image Inpainting",
"Generative Adversarial Networks",
"Context Loss",
"Prior Loss"
],
"authors": [
{
"affiliation": "Guangxi Normal University,College of Electronic Engineering,Guilin,China",
"fullName": "Chugang Wu",
"givenName": "Chugang",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Guangxi Normal University,College of Electronic Engineering,Guilin,China",
"fullName": "Yanhua Xian",
"givenName": "Yanhua",
"surname": "Xian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Guangxi Normal University,College of Electronic Engineering,Guilin,China",
"fullName": "Junqi Bai",
"givenName": "Junqi",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Guangxi Normal University,College of Electronic Engineering,Guilin,China",
"fullName": "Yuancheng Jing",
"givenName": "Yuancheng",
"surname": "Jing",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icaice",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-10-01T00:00:00",
"pubType": "proceedings",
"pages": "276-280",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9146-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "914600a268",
"articleId": "1rCg7g1SMJW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "914600a281",
"articleId": "1rCg6mba8YU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457g882",
"title": "Semantic Image Inpainting with Deep Generative Models",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g882/12OmNzXnNvU",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a588",
"title": "Image Inpainting: A Contextual Consistent and Deep Generative Adversarial Training Approach",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a588/17D45WgziOP",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4094",
"title": "WaveFill: A Wavelet-based Generation Network for Image Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4094/1BmKV8lWWT6",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a977",
"title": "Image Multi-Inpainting via Progressive Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a977/1G56VVqeIco",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200b168",
"title": "Facial Image Inpainting Using Multi-level Generative Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200b168/1cdONULOGju",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a203",
"title": "Joint Inpainting of RGB and Depth Images by Generative Adversarial Network with a Late Fusion Approach",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a203/1gysn7LMTfO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0520",
"title": "Boundless: Generative Adversarial Networks for Image Extension",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0520/1hVlEtLruve",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102794",
"title": "Eigan: Enhanced Inpainting Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102794/1kwrlxsf48o",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2020/7083/0/708300a259",
"title": "Research on Image Inpainting Based on Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2020/708300a259/1oCn35eA06I",
"parentPublication": {
"id": "proceedings/iccnea/2020/7083/0",
"title": "2020 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a137",
"title": "Research on Character Image Inpainting based on Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a137/1p1gu7CZsqI",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tmhi3ly74c",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tmhFtJAjIY",
"doi": "10.1109/ICPR48806.2021.9412655",
"title": "Adaptive Image Compression Using GAN based Semantic-perceptual Residual Compensation",
"normalizedTitle": "Adaptive Image Compression Using GAN based Semantic-perceptual Residual Compensation",
"abstract": "Image compression is a basic task in image processing. The existing methods always have problems such as the loss of image details and the reconstructed image does not conform to human vision. This paper presents an adaptive image compression algorithm that relies on GAN based semantic-perceptual residual compensation, which is available to offer visually pleasing reconstruction at a low bitrate. Our method derive from a U-shaped encoder-decoder structure accompanied by a well-designed dense residual connection with a strip pooling module to improve the original auto-encoder. Besides, we utilize the idea of adversarial learning by introducing a discriminator, thus constructed a complete GAN. To improve the coding efficiency, we creatively designed an adaptive semantic-perception residual compensation block based on the Grad-CAM algorithm. Through the strategy of adversarial learning, the reconstructed image is more towards the distribution of the real image, and further semantic perception can achieve higher quality compression of the region of interest from the human attention. Besides, we combine multiple existing quantitative methods, including the latest FLIF lossless compression algorithm, BPG vector compression algorithm and soft-quantization to perform deeper compression on the image. Experimental results, including PSNR, MS-SSIM demonstrate that the proposed approach outperforms the current state-of-the-art image compression methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Image compression is a basic task in image processing. The existing methods always have problems such as the loss of image details and the reconstructed image does not conform to human vision. This paper presents an adaptive image compression algorithm that relies on GAN based semantic-perceptual residual compensation, which is available to offer visually pleasing reconstruction at a low bitrate. Our method derive from a U-shaped encoder-decoder structure accompanied by a well-designed dense residual connection with a strip pooling module to improve the original auto-encoder. Besides, we utilize the idea of adversarial learning by introducing a discriminator, thus constructed a complete GAN. To improve the coding efficiency, we creatively designed an adaptive semantic-perception residual compensation block based on the Grad-CAM algorithm. Through the strategy of adversarial learning, the reconstructed image is more towards the distribution of the real image, and further semantic perception can achieve higher quality compression of the region of interest from the human attention. Besides, we combine multiple existing quantitative methods, including the latest FLIF lossless compression algorithm, BPG vector compression algorithm and soft-quantization to perform deeper compression on the image. Experimental results, including PSNR, MS-SSIM demonstrate that the proposed approach outperforms the current state-of-the-art image compression methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image compression is a basic task in image processing. The existing methods always have problems such as the loss of image details and the reconstructed image does not conform to human vision. This paper presents an adaptive image compression algorithm that relies on GAN based semantic-perceptual residual compensation, which is available to offer visually pleasing reconstruction at a low bitrate. Our method derive from a U-shaped encoder-decoder structure accompanied by a well-designed dense residual connection with a strip pooling module to improve the original auto-encoder. Besides, we utilize the idea of adversarial learning by introducing a discriminator, thus constructed a complete GAN. To improve the coding efficiency, we creatively designed an adaptive semantic-perception residual compensation block based on the Grad-CAM algorithm. Through the strategy of adversarial learning, the reconstructed image is more towards the distribution of the real image, and further semantic perception can achieve higher quality compression of the region of interest from the human attention. Besides, we combine multiple existing quantitative methods, including the latest FLIF lossless compression algorithm, BPG vector compression algorithm and soft-quantization to perform deeper compression on the image. Experimental results, including PSNR, MS-SSIM demonstrate that the proposed approach outperforms the current state-of-the-art image compression methods.",
"fno": "09412655",
"keywords": [
"Data Compression",
"Image Coding",
"Image Reconstruction",
"Learning Artificial Intelligence",
"Neural Nets",
"Adversarial Learning",
"Reconstructed Image",
"Higher Quality Compression",
"Multiple Existing Quantitative Methods",
"BPG Vector Compression Algorithm",
"Deeper Compression",
"Image Processing",
"Human Vision",
"Adaptive Image Compression Algorithm",
"U Shaped Encoder Decoder Structure",
"Dense Residual Connection",
"Strip Pooling Module",
"Original Auto Encoder",
"Complete GAN",
"Grad CAM Algorithm",
"Semantic Perceptual Residual Compensation",
"FLIF Lossless Compression",
"Coding Efficiency",
"Adaptive Semantic Perception",
"Residual Compensation Block",
"Soft Quantization",
"Strips",
"Image Coding",
"Adaptive Systems",
"Quantization Signal",
"Semantics",
"Generative Adversarial Networks",
"Pattern Recognition"
],
"authors": [
{
"affiliation": "Graduate School of IPS, Waseda University,Kitakyushu,Japan",
"fullName": "Ruojing Wang",
"givenName": "Ruojing",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of IPS, Waseda University,Kitakyushu,Japan",
"fullName": "Zitang Sun",
"givenName": "Zitang",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of IPS, Waseda University,Kitakyushu,Japan",
"fullName": "Sei-ichiro Kamata",
"givenName": "Sei-ichiro",
"surname": "Kamata",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "9030-9037",
"year": "2021",
"issn": "1051-4651",
"isbn": "978-1-7281-8808-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09413296",
"articleId": "1tmj56YrQ1a",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09412963",
"articleId": "1tmidmo2qaY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2022/0915/0/091500b422",
"title": "PPCD-GAN: Progressive Pruning and Class-Aware Distillation for Large-Scale Conditional GANs Compression",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500b422/1B12Ip5Xv4Q",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlbdbi/2021/1790/0/179000a180",
"title": "Image Classification and Generation Based on GAN Model",
"doi": null,
"abstractUrl": "/proceedings-article/mlbdbi/2021/179000a180/1BQiufCqV7W",
"parentPublication": {
"id": "proceedings/mlbdbi/2021/1790/0",
"title": "2021 3rd International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaice/2021/2186/0/218600a802",
"title": "Image Translation based on Attention Residual GAN",
"doi": null,
"abstractUrl": "/proceedings-article/icaice/2021/218600a802/1Et4rw01Jvi",
"parentPublication": {
"id": "proceedings/icaice/2021/2186/0",
"title": "2021 2nd International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900b769",
"title": "Perceptual in-Loop Filter for Image and Video Compression",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900b769/1G57eD5Rqwg",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093387",
"title": "A GAN-based Tunable Image Compression System",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093387/1jPbpI8xSqA",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f283",
"title": "GAN Compression: Efficient Architectures for Interactive Conditional GANs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f283/1m3og8x4vra",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412185",
"title": "Fidelity-Controllable Extreme Image Compression with Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412185/1tmiI4mszPq",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900b895",
"title": "Perceptual Image Compression using Relativistic Average Least Squares GANs",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900b895/1yXsCRpFuqk",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900m2151",
"title": "Content-Aware GAN Compression",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900m2151/1yeJfjdpcg8",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09609572",
"title": "GAN Compression: Efficient Architectures for Interactive Conditional GANs",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09609572/1yoxFsTI4vu",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1uiluGq0Oo8",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1uim6wBM1Mc",
"doi": "10.1109/ICME51207.2021.9428348",
"title": "High-Quality Face Sketch Synthesis via Geometric Normalization and Regularization",
"normalizedTitle": "High-Quality Face Sketch Synthesis via Geometric Normalization and Regularization",
"abstract": "In this work, we propose a novel Generative Adversarial Network for generating a structure-consistent and texture-realistic sketch, conditioned on a face photo. To this end, we propose to boost the capacity of the generator via geometric normalization and regularization. Specially, we first propose an enhanced spatially-adaptive normalization module to modulate the activation, based on the semantic layout and encoding features of the input face. Besides, we use two regularization loss functions to minimize the structural divergence between a generated sketch and the corresponding face photo. Experimental results show that our proposed techniques significantly improve the quality of synthesized sketches, in terms of both structure and texture. Besides, our full model can generate high-quality sketches and significantly outperform previous state-of-the-arts, over a wide range of challenging data. We have made our code and results publicly available: http://aiart.live/genre/.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we propose a novel Generative Adversarial Network for generating a structure-consistent and texture-realistic sketch, conditioned on a face photo. To this end, we propose to boost the capacity of the generator via geometric normalization and regularization. Specially, we first propose an enhanced spatially-adaptive normalization module to modulate the activation, based on the semantic layout and encoding features of the input face. Besides, we use two regularization loss functions to minimize the structural divergence between a generated sketch and the corresponding face photo. Experimental results show that our proposed techniques significantly improve the quality of synthesized sketches, in terms of both structure and texture. Besides, our full model can generate high-quality sketches and significantly outperform previous state-of-the-arts, over a wide range of challenging data. We have made our code and results publicly available: http://aiart.live/genre/.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we propose a novel Generative Adversarial Network for generating a structure-consistent and texture-realistic sketch, conditioned on a face photo. To this end, we propose to boost the capacity of the generator via geometric normalization and regularization. Specially, we first propose an enhanced spatially-adaptive normalization module to modulate the activation, based on the semantic layout and encoding features of the input face. Besides, we use two regularization loss functions to minimize the structural divergence between a generated sketch and the corresponding face photo. Experimental results show that our proposed techniques significantly improve the quality of synthesized sketches, in terms of both structure and texture. Besides, our full model can generate high-quality sketches and significantly outperform previous state-of-the-arts, over a wide range of challenging data. We have made our code and results publicly available: http://aiart.live/genre/.",
"fno": "09428348",
"keywords": [
"Face Recognition",
"Feature Extraction",
"Image Texture",
"High Quality Sketches",
"Synthesized Sketches",
"Corresponding Face Photo",
"Generated Sketch",
"Structural Divergence",
"Regularization Loss",
"Input Face",
"Encoding Features",
"Semantic Layout",
"Enhanced Spatially Adaptive Normalization Module",
"Texture Realistic Sketch",
"Structure Consistent",
"Novel Generative Adversarial Network",
"Geometric Normalization",
"High Quality Face Sketch Synthesis",
"Frequency Selective Surfaces",
"Conferences",
"Semantics",
"Layout",
"Generative Adversarial Networks",
"Generators",
"Encoding",
"Face Sketch Synthesis",
"Generative Adversarial Network",
"Deep Learning",
"Spatially Adaptive Denormalization",
"Portrait Drawings Generation"
],
"authors": [
{
"affiliation": "Hangzhou Dianzi University,School of Computer Science and Technology,Hangzhou,China,310018",
"fullName": "Xiang Li",
"givenName": "Xiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hangzhou Dianzi University,School of Computer Science and Technology,Hangzhou,China,310018",
"fullName": "Fei Gao",
"givenName": "Fei",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hangzhou Dianzi University,School of Computer Science and Technology,Hangzhou,China,310018",
"fullName": "Fei Huang",
"givenName": "Fei",
"surname": "Huang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3864-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09428467",
"articleId": "1uilO1EGYNy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09428419",
"articleId": "1uim56c0BOg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icig/2011/4541/0/4541a412",
"title": "Local Regression Model for Automatic Face Sketch Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a412/12OmNAq3hxY",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2003/1950/1/195010687",
"title": "Face Sketch Synthesis and Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2003/195010687/12OmNCcbE0u",
"parentPublication": {
"id": "proceedings/iccv/2003/1950/1",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a082",
"title": "Face Sketch-Photo Synthesis under Multi-dictionary Sparse Representation Framework",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a082/12OmNx0RIMA",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460802",
"title": "Smoothness-constrained face photo-sketch synthesis using sparse representation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460802/12OmNxE2mI9",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2009/11/ttp2009111955",
"title": "Face Photo-Sketch Synthesis and Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2009/11/ttp2009111955/13rRUxOveaX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2017/2652/0/2652a432",
"title": "FaceNet Based Face Sketch Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2017/2652a432/17D45WrVg0v",
"parentPublication": {
"id": "proceedings/csci/2017/2652/0",
"title": "2017 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a944",
"title": "Adversarial Open Domain Adaptation for Sketch-to-Photo Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a944/1B13AO3WYa4",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956126",
"title": "Deep face generation from a rough sketch using multi-level generative adversarial networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956126/1IHoDYoZqnK",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic3/2019/3591/0/08844885",
"title": "Human Sketch Recognition using Generative Adversarial Networks and One-Shot Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2019/08844885/1dx8qXnyVvq",
"parentPublication": {
"id": "proceedings/ic3/2019/3591/0",
"title": "2019 Twelfth International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1wnPqJgL30c",
"title": "2021 Nicograph International (NicoInt)",
"acronym": "nicoint",
"groupId": "1814784",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1wnPu0lkCvS",
"doi": "10.1109/NICOINT52941.2021.00009",
"title": "Sketch-based Anime Hairstyle Editing with Generative Inpainting",
"normalizedTitle": "Sketch-based Anime Hairstyle Editing with Generative Inpainting",
"abstract": "In this work, we propose an interactive sketch-based design interface for anime hairstyle editing. The proposed system adopts the gated convolutional layer in the Generative Adversarial Networks model to achieve the generative editing of anime images. The generator network is based on an encoder-decoder framework with gated convolution to ensure the network can learn the random mask feature in image editing. The discriminator network uses the spectral-normalized architecture to preserve the stability of learning. In this study, we first collect and preprocess the dataset for data training. After that, we construct the user interface based on the pretrained model. Finally, we evaluate the system usage and user experience of the proposed editing interface.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we propose an interactive sketch-based design interface for anime hairstyle editing. The proposed system adopts the gated convolutional layer in the Generative Adversarial Networks model to achieve the generative editing of anime images. The generator network is based on an encoder-decoder framework with gated convolution to ensure the network can learn the random mask feature in image editing. The discriminator network uses the spectral-normalized architecture to preserve the stability of learning. In this study, we first collect and preprocess the dataset for data training. After that, we construct the user interface based on the pretrained model. Finally, we evaluate the system usage and user experience of the proposed editing interface.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we propose an interactive sketch-based design interface for anime hairstyle editing. The proposed system adopts the gated convolutional layer in the Generative Adversarial Networks model to achieve the generative editing of anime images. The generator network is based on an encoder-decoder framework with gated convolution to ensure the network can learn the random mask feature in image editing. The discriminator network uses the spectral-normalized architecture to preserve the stability of learning. In this study, we first collect and preprocess the dataset for data training. After that, we construct the user interface based on the pretrained model. Finally, we evaluate the system usage and user experience of the proposed editing interface.",
"fno": "395400a007",
"keywords": [
"Computer Animation",
"Graphical User Interfaces",
"Interactive Systems",
"Learning Artificial Intelligence",
"Neural Nets",
"Solid Modelling",
"Sketch Based Anime Hairstyle Editing",
"Generative Inpainting",
"Interactive Sketch Based Design Interface",
"Gated Convolutional Layer",
"Generative Adversarial Networks Model",
"Generative Editing",
"Generator Network",
"Encoder Decoder Framework",
"Gated Convolution",
"Random Mask Feature",
"Discriminator Network",
"User Interface",
"User Experience",
"Anime Image Editing Interface",
"Training",
"Convolution",
"Semantics",
"Prototypes",
"User Interfaces",
"Logic Gates",
"Generative Adversarial Networks",
"Anime Hairstyle",
"Editing Interface",
"Generative Model",
"Gated Convolution"
],
"authors": [
{
"affiliation": "Japan Advanced Institute of Science and Technology,Ishikawa,Japan",
"fullName": "Shuyang Luo",
"givenName": "Shuyang",
"surname": "Luo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Japan Advanced Institute of Science and Technology,Ishikawa,Japan",
"fullName": "Haoran Xie",
"givenName": "Haoran",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Japan Advanced Institute of Science and Technology,Ishikawa,Japan",
"fullName": "Kazunori Miyata",
"givenName": "Kazunori",
"surname": "Miyata",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "nicoint",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-07-01T00:00:00",
"pubType": "proceedings",
"pages": "7-14",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3954-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "395400a001",
"articleId": "1wnPrwHNFwQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "395400a015",
"articleId": "1wnPstnEOxG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ichci/2021/0764/0/076400a027",
"title": "Generating Anime Characters and Experimental Analysis Based on DCGAN Model",
"doi": null,
"abstractUrl": "/proceedings-article/ichci/2021/076400a027/1Bb0WjVO1fW",
"parentPublication": {
"id": "proceedings/ichci/2021/0764/0",
"title": "2021 2nd International Conference on Intelligent Computing and Human-Computer Interaction (ICHCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnlp/2022/9544/0/954400a081",
"title": "Face Inpainting Algorithm Combining Face Sketch and Gate Convolution",
"doi": null,
"abstractUrl": "/proceedings-article/icnlp/2022/954400a081/1GNtpLkDiV2",
"parentPublication": {
"id": "proceedings/icnlp/2022/9544/0",
"title": "2022 4th International Conference on Natural Language Processing (ICNLP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1184",
"title": "Attribute Group Editing for Reliable Few-shot Image Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1184/1H1jtqIOq3K",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8459",
"title": "Brain-Supervised Image Editing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8459/1H1k1IE8ivS",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpbd&is/2019/0466/0/08735455",
"title": "Facial Attribute Editing using Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/hpbd&is/2019/08735455/1aPuRzozTfW",
"parentPublication": {
"id": "proceedings/hpbd&is/2019/0466/0",
"title": "2019 International Conference on High Performance Big Data and Intelligent Systems (HPBD&IS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102794",
"title": "Eigan: Enhanced Inpainting Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102794/1kwrlxsf48o",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2020/7083/0/708300a259",
"title": "Research on Image Inpainting Based on Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2020/708300a259/1oCn35eA06I",
"parentPublication": {
"id": "proceedings/iccnea/2020/7083/0",
"title": "2020 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnlp/2021/1411/0/141100a213",
"title": "Face Inpainting Combining Structured Forest Edge Information and Gated Convolution",
"doi": null,
"abstractUrl": "/proceedings-article/icnlp/2021/141100a213/1wYlkVBWSu4",
"parentPublication": {
"id": "proceedings/icnlp/2021/1411/0",
"title": "2021 3rd International Conference on Natural Language Processing (ICNLP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900e430",
"title": "Generative Hierarchical Features from Synthesizing Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900e430/1yeL04fqrXG",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2021/2354/0/235400a129",
"title": "The Gated Recurrent Conditional Generative Adversarial Network (GRC-GAN): application to denoising of low-dose CT images",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2021/235400a129/1zurrLUnFlu",
"parentPublication": {
"id": "proceedings/sibgrapi/2021/2354/0",
"title": "2021 34th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1zw5CYExBa8",
"title": "2021 Ninth International Symposium on Computing and Networking Workshops (CANDARW)",
"acronym": "candarw",
"groupId": "1829704",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1zw5HlWwmL6",
"doi": "10.1109/CANDARW53999.2021.00089",
"title": "Loss Function of GAN to Make a Clear Judgment",
"normalizedTitle": "Loss Function of GAN to Make a Clear Judgment",
"abstract": "In general, in GAN training, the Discriminator learns a loss function whose value becomes smaller when the expected label matches the correct label. However, if we do not make a clear decision about the authenticity of the image, the GAN will continue to learn about ambiguous images, and the resulting images will be less clear. In this experiment, we propose a loss function that penalizes the Discriminator when it makes an ambiguous decision by adding a regularization term to the existing loss function Binary Cross Entropy(BCE), and verify its superiority through experiments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In general, in GAN training, the Discriminator learns a loss function whose value becomes smaller when the expected label matches the correct label. However, if we do not make a clear decision about the authenticity of the image, the GAN will continue to learn about ambiguous images, and the resulting images will be less clear. In this experiment, we propose a loss function that penalizes the Discriminator when it makes an ambiguous decision by adding a regularization term to the existing loss function Binary Cross Entropy(BCE), and verify its superiority through experiments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In general, in GAN training, the Discriminator learns a loss function whose value becomes smaller when the expected label matches the correct label. However, if we do not make a clear decision about the authenticity of the image, the GAN will continue to learn about ambiguous images, and the resulting images will be less clear. In this experiment, we propose a loss function that penalizes the Discriminator when it makes an ambiguous decision by adding a regularization term to the existing loss function Binary Cross Entropy(BCE), and verify its superiority through experiments.",
"fno": "283500a478",
"keywords": [
"Entropy",
"Image Processing",
"Neural Nets",
"Clear Judgment",
"GAN Training",
"Ambiguous Images",
"Ambiguous Decision",
"Binary Cross Entropy",
"Training",
"Conferences",
"Generative Adversarial Networks",
"Probability Distribution",
"GAN",
"Machine Learning",
"Deep Learning",
"Regularization Term"
],
"authors": [
{
"affiliation": "Kanazawa University,Kanazawa,Japan",
"fullName": "Kuniyasu Imade",
"givenName": "Kuniyasu",
"surname": "Imade",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kanazawa University,Kanazawa,Japan",
"fullName": "Toi Tsuneda",
"givenName": "Toi",
"surname": "Tsuneda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kanazawa University,Kanazawa,Japan",
"fullName": "Satoshi Yamane",
"givenName": "Satoshi",
"surname": "Yamane",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kanazawa University,Kanazawa,Japan",
"fullName": "Kousuke Shintani",
"givenName": "Kousuke",
"surname": "Shintani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kanazawa University,Kanazawa,Japan",
"fullName": "Taro Kiriyama",
"givenName": "Taro",
"surname": "Kiriyama",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "candarw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "478-480",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2835-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "283500a473",
"articleId": "1zw5LZj6M2A",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "283500a481",
"articleId": "1zw5OtnTdvO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2018/6420/0/642000a821",
"title": "FaceID-GAN: Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a821/17D45Xh13pk",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issrew/2021/2603/0/260300a280",
"title": "Multi-Feature Fusion based Image Steganography using GAN",
"doi": null,
"abstractUrl": "/proceedings-article/issrew/2021/260300a280/1AZO9amrOrC",
"parentPublication": {
"id": "proceedings/issrew/2021/2603/0",
"title": "2021 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1265",
"title": "DO-GAN: A Double Oracle Framework for Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1265/1H1kORDwthu",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/5555/01/10012320",
"title": "RDP-GAN: A Rényi-Differential Privacy Based Generative Adversarial Network",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/10012320/1JNmQL4l7lm",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2019/3014/0/301400a178",
"title": "TH-GAN: Generative Adversarial Network Based Transfer Learning for Historical Chinese Character Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2019/301400a178/1h81u6jDzSE",
"parentPublication": {
"id": "proceedings/icdar/2019/3014/0",
"title": "2019 International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093525",
"title": "FX-GAN: Self-Supervised GAN Learning via Feature Exchange",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093525/1jPbxvOsk6s",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2020/4380/0/438000a864",
"title": "Generation of malicious webpage samples based on GAN",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2020/438000a864/1r54cGDIsw0",
"parentPublication": {
"id": "proceedings/trustcom/2020/4380/0",
"title": "2020 IEEE 19th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700c545",
"title": "Accelerated WGAN update strategy with loss change rate balancing",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700c545/1uqGDIGZOy4",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdataservice/2021/3483/0/348300a069",
"title": "Deepfake Detection using GAN Discriminators",
"doi": null,
"abstractUrl": "/proceedings-article/bigdataservice/2021/348300a069/1xNNnHKs5xe",
"parentPublication": {
"id": "proceedings/bigdataservice/2021/3483/0",
"title": "2021 IEEE Seventh International Conference on Big Data Computing Service and Applications (BigDataService)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2021/0898/0/089800a420",
"title": "Use mean field theory to train a 200-layer vanilla GAN",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2021/089800a420/1zw64SvtrSU",
"parentPublication": {
"id": "proceedings/ictai/2021/0898/0",
"title": "2021 IEEE 33rd International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCbU3aP",
"title": "2009 WRI Global Congress on Intelligent Systems",
"acronym": "gcis",
"groupId": "1002842",
"volume": "2",
"displayVolume": "2",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNA0vo1q",
"doi": "10.1109/GCIS.2009.338",
"title": "Key Techniques of Eye Gaze Tracking Based on Pupil Corneal Reflection",
"normalizedTitle": "Key Techniques of Eye Gaze Tracking Based on Pupil Corneal Reflection",
"abstract": "Eye Gaze Tracking (EGT) is a kind of techniques that can estimate gaze direction of a person via various methods such as optics, electronics, mechanics etc. Pupil corneal reflection is one of those methods that using the vector from Purkinje image center location to pupil center location to estimate the gaze direction. A review on key techniques of EGT based on pupil corneal reflection is given in three aspects of eye feature detection, mapping model from gaze parameters to gaze direction and face pose estimation. The characteristics of two kind mapping models are analyzed and the detection method of gaze direction based on stereovision is discussed. According to a large number of recent references, we propose some major problems and their possible solutions in EGT techniques as well as its research orientation in future.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Eye Gaze Tracking (EGT) is a kind of techniques that can estimate gaze direction of a person via various methods such as optics, electronics, mechanics etc. Pupil corneal reflection is one of those methods that using the vector from Purkinje image center location to pupil center location to estimate the gaze direction. A review on key techniques of EGT based on pupil corneal reflection is given in three aspects of eye feature detection, mapping model from gaze parameters to gaze direction and face pose estimation. The characteristics of two kind mapping models are analyzed and the detection method of gaze direction based on stereovision is discussed. According to a large number of recent references, we propose some major problems and their possible solutions in EGT techniques as well as its research orientation in future.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Eye Gaze Tracking (EGT) is a kind of techniques that can estimate gaze direction of a person via various methods such as optics, electronics, mechanics etc. Pupil corneal reflection is one of those methods that using the vector from Purkinje image center location to pupil center location to estimate the gaze direction. A review on key techniques of EGT based on pupil corneal reflection is given in three aspects of eye feature detection, mapping model from gaze parameters to gaze direction and face pose estimation. The characteristics of two kind mapping models are analyzed and the detection method of gaze direction based on stereovision is discussed. According to a large number of recent references, we propose some major problems and their possible solutions in EGT techniques as well as its research orientation in future.",
"fno": "3571b133",
"keywords": [
"Eye Gaze Tracking",
"IR Source",
"Pupil",
"Purkinje Image",
"Statistic Model",
"Geometric Model",
"Face Pose Estimation",
"Stereovision"
],
"authors": [
{
"affiliation": null,
"fullName": "Chi Jian-nan",
"givenName": "Chi",
"surname": "Jian-nan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhang Peng-yi",
"givenName": "Zhang",
"surname": "Peng-yi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zheng Si-yi",
"givenName": "Zheng",
"surname": "Si-yi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhang Chuang",
"givenName": "Zhang",
"surname": "Chuang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Huang Ying",
"givenName": "Huang",
"surname": "Ying",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "gcis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-05-01T00:00:00",
"pubType": "proceedings",
"pages": "133-138",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3571-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3571b127",
"articleId": "12OmNyPQ4LO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3571b139",
"articleId": "12OmNBh8gT3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icisa/2014/4443/0/06847398",
"title": "Eye Detection for near Infrared Based Gaze Tracking System",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2014/06847398/12OmNrJAdSv",
"parentPublication": {
"id": "proceedings/icisa/2014/4443/0",
"title": "2014 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2010/4249/0/4249a048",
"title": "Implementation and Optimization of the Eye Gaze Tracking System Based on DM642",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2010/4249a048/12OmNs4S8I4",
"parentPublication": {
"id": "proceedings/icinis/2010/4249/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/3/3736c617",
"title": "Eye Gaze Calculation Based on Nonlinear Polynomial and Generalized Regression Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736c617/12OmNwD1pNV",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/3",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981a458",
"title": "Eye Detection for Gaze Tracker with Near Infrared Illuminator",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981a458/12OmNx3q6Yv",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2003/2032/0/20320406",
"title": "A Computer Vision Framework for Eye Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2003/20320406/12OmNyr8YuL",
"parentPublication": {
"id": "proceedings/sibgrapi/2003/2032/0",
"title": "16th Brazilian Symposium on Computer Graphics and Image Processing (SIBGRAPI 2003)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/1/3736a342",
"title": "The Role of Trait Anxiety in the Interaction between Eye Gaze and Facial Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736a342/12OmNzAohXt",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/4",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ihmsc/2010/4151/1/4151a300",
"title": "A Novel Simple 2D Model of Eye Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/ihmsc/2010/4151a300/12OmNzQR1nK",
"parentPublication": {
"id": "proceedings/ihmsc/2010/4151/1",
"title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2002/1602/0/16020101",
"title": "Non-Contact Eye Gaze Tracking System by Mapping of Corneal Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020101/12OmNzgwmIY",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a449",
"title": "A Novel Remote Eye Gaze Tracking System Using Line Illumination Sources",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a449/19wB6HJrVmM",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a375",
"title": "Neural 3D Gaze: 3D Pupil Localization and Gaze Tracking based on Anatomical Eye Model and Neural Refraction Correction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a375/1JrQRCijhMk",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwt5sgJ",
"title": "CVPR 2011",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC8MsAV",
"doi": "10.1109/CVPR.2011.5995675",
"title": "Probabilistic gaze estimation without active personal calibration",
"normalizedTitle": "Probabilistic gaze estimation without active personal calibration",
"abstract": "Existing eye gaze tracking systems typically require an explicit personal calibration process in order to estimate certain person-specific eye parameters. For natural human computer interaction, such a personal calibration is often cumbersome and unnatural. In this paper, we propose a new probabilistic eye gaze tracking system without explicit personal calibration. Unlike the traditional eye gaze tracking methods, which estimate the eye parameter deterministically, our approach estimates the probability distributions of the eye parameter and the eye gaze, by combining image saliency with the 3D eye model. By using an incremental learning framework, the subject doesn't need personal calibration before using the system. His/her eye parameter and gaze estimation can be improved gradually when he/she is naturally viewing a sequence of images on the screen. The experimental result shows that the proposed system can achieve less than three degrees accuracy for different people without calibration.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Existing eye gaze tracking systems typically require an explicit personal calibration process in order to estimate certain person-specific eye parameters. For natural human computer interaction, such a personal calibration is often cumbersome and unnatural. In this paper, we propose a new probabilistic eye gaze tracking system without explicit personal calibration. Unlike the traditional eye gaze tracking methods, which estimate the eye parameter deterministically, our approach estimates the probability distributions of the eye parameter and the eye gaze, by combining image saliency with the 3D eye model. By using an incremental learning framework, the subject doesn't need personal calibration before using the system. His/her eye parameter and gaze estimation can be improved gradually when he/she is naturally viewing a sequence of images on the screen. The experimental result shows that the proposed system can achieve less than three degrees accuracy for different people without calibration.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Existing eye gaze tracking systems typically require an explicit personal calibration process in order to estimate certain person-specific eye parameters. For natural human computer interaction, such a personal calibration is often cumbersome and unnatural. In this paper, we propose a new probabilistic eye gaze tracking system without explicit personal calibration. Unlike the traditional eye gaze tracking methods, which estimate the eye parameter deterministically, our approach estimates the probability distributions of the eye parameter and the eye gaze, by combining image saliency with the 3D eye model. By using an incremental learning framework, the subject doesn't need personal calibration before using the system. His/her eye parameter and gaze estimation can be improved gradually when he/she is naturally viewing a sequence of images on the screen. The experimental result shows that the proposed system can achieve less than three degrees accuracy for different people without calibration.",
"fno": "05995675",
"keywords": [
"Image Sequences",
"Probabilistic Gaze Estimation",
"Personal Calibration",
"Eye Gaze Tracking Systems",
"Human Computer Interaction",
"Probability Distributions",
"3 D Eye Model",
"Incremental Learning",
"Parameter Estimation"
],
"authors": [
{
"affiliation": "Dept. of Electr., Comput. & Syst. Eng., Rensselaer Polytech. Inst., Troy, NY, USA",
"fullName": "Jixu Chen",
"givenName": null,
"surname": "Jixu Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr., Comput. & Syst. Eng., Rensselaer Polytech. Inst., Troy, NY, USA",
"fullName": "Qiang Ji",
"givenName": null,
"surname": "Qiang Ji",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-06-01T00:00:00",
"pubType": "proceedings",
"pages": "609-616",
"year": "2011",
"issn": null,
"isbn": "978-1-4577-0394-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05995674",
"articleId": "12OmNyTwRhK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05995676",
"articleId": "12OmNA14Ah5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itme/2016/3906/0/3906a380",
"title": "A New Calibration-Free Gaze Tracking Algorithm Based on DE-SLFA",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2016/3906a380/12OmNBbsieg",
"parentPublication": {
"id": "proceedings/itme/2016/3906/0",
"title": "2016 8th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2011/4419/0/4419a186",
"title": "The Importance of Eye Gaze and Head Pose to Estimating Levels of Attention",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2011/4419a186/12OmNqyDjtb",
"parentPublication": {
"id": "proceedings/vs-games/2011/4419/0",
"title": "Games and Virtual Worlds for Serious Applications, Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2010/4249/0/4249a048",
"title": "Implementation and Optimization of the Eye Gaze Tracking System Based on DM642",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2010/4249a048/12OmNs4S8I4",
"parentPublication": {
"id": "proceedings/icinis/2010/4249/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890322",
"title": "Realtime gaze estimation with online calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890322/12OmNvjyxUU",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b003",
"title": "Real Time Eye Gaze Tracking with 3D Deformable Eye-Face Model",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b003/12OmNwNeYAV",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771469",
"title": "Constraint-based gaze estimation without active calibration",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771469/12OmNwdbVch",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d870",
"title": "Visual Gaze Estimation by Joint Head and Eye Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d870/12OmNyRg4Cq",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ihmsc/2010/4151/1/4151a300",
"title": "A Novel Simple 2D Model of Eye Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/ihmsc/2010/4151a300/12OmNzQR1nK",
"parentPublication": {
"id": "proceedings/ihmsc/2010/4151/1",
"title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wkdd/2009/3543/0/3543a594",
"title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn",
"parentPublication": {
"id": "proceedings/wkdd/2009/3543/0",
"title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/03/ttp2010030478",
"title": "In the Eye of the Beholder: A Survey of Models for Eyes and Gaze",
"doi": null,
"abstractUrl": "/journal/tp/2010/03/ttp2010030478/13rRUxOdD9o",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrNh0vw",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvCi45y",
"doi": "10.1109/ICPR.2014.208",
"title": "Gaze Estimation Based on 3D Face Structure and Pupil Centers",
"normalizedTitle": "Gaze Estimation Based on 3D Face Structure and Pupil Centers",
"abstract": "It is a challenging problem to realize a robust and low cost gaze estimation system. Most existing feature-based gaze estimation methods strongly rely on cornea reflections, which are unstable to glasses, head movements and natural light. In this paper, we propose a novel gaze estimation method without use of cornea reflections based on a stereo camera system. Firstly, 3D Active Shape Models (ASM) is reconstructed using stereo vision to represent 3D face structure. Then, without use of cornea reflections, a 3D Pupil-Eye-Contours based feature is proposed to represent human gaze information. What's more, precise estimation of head poses based on 3D face structure is employed to rectify the 3D pupil centers and eye contours for improving the ability of tolerance to head movements. Experiments on fifteen subjects show that the system is accurate and allows natural head movements.",
"abstracts": [
{
"abstractType": "Regular",
"content": "It is a challenging problem to realize a robust and low cost gaze estimation system. Most existing feature-based gaze estimation methods strongly rely on cornea reflections, which are unstable to glasses, head movements and natural light. In this paper, we propose a novel gaze estimation method without use of cornea reflections based on a stereo camera system. Firstly, 3D Active Shape Models (ASM) is reconstructed using stereo vision to represent 3D face structure. Then, without use of cornea reflections, a 3D Pupil-Eye-Contours based feature is proposed to represent human gaze information. What's more, precise estimation of head poses based on 3D face structure is employed to rectify the 3D pupil centers and eye contours for improving the ability of tolerance to head movements. Experiments on fifteen subjects show that the system is accurate and allows natural head movements.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "It is a challenging problem to realize a robust and low cost gaze estimation system. Most existing feature-based gaze estimation methods strongly rely on cornea reflections, which are unstable to glasses, head movements and natural light. In this paper, we propose a novel gaze estimation method without use of cornea reflections based on a stereo camera system. Firstly, 3D Active Shape Models (ASM) is reconstructed using stereo vision to represent 3D face structure. Then, without use of cornea reflections, a 3D Pupil-Eye-Contours based feature is proposed to represent human gaze information. What's more, precise estimation of head poses based on 3D face structure is employed to rectify the 3D pupil centers and eye contours for improving the ability of tolerance to head movements. Experiments on fifteen subjects show that the system is accurate and allows natural head movements.",
"fno": "5209b156",
"keywords": [
"Three Dimensional Displays",
"Estimation",
"Face",
"Calibration",
"Feature Extraction",
"Solid Modeling",
"Head Pose",
"Gaze Estimation",
"3 D Face Structure"
],
"authors": [
{
"affiliation": null,
"fullName": "Chunshui Xiong",
"givenName": "Chunshui",
"surname": "Xiong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lei Huang",
"givenName": "Lei",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Changping Liu",
"givenName": "Changping",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1156-1161",
"year": "2014",
"issn": "1051-4651",
"isbn": "978-1-4799-5209-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5209b150",
"articleId": "12OmNzUgd48",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5209b162",
"articleId": "12OmNBuL1hu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/5209b162",
"title": "3-D Gaze Tracking Using Pupil Contour Features",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b162/12OmNBuL1hu",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b003",
"title": "Real Time Eye Gaze Tracking with 3D Deformable Eye-Face Model",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b003/12OmNwNeYAV",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a606",
"title": "Eye-Model-Based Gaze Estimation by RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a606/12OmNyqiaTI",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a792",
"title": "Person-Independent 3D Gaze Estimation Using Face Frontalization",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a792/12OmNzYwbWh",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733c299",
"title": "It’s Written All Over Your Face: Full-Face Appearance-Based Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733c299/12OmNzaQoPr",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2017/6724/0/07926555",
"title": "Gaze Tracking and Object Recognition from Eye Images",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2017/07926555/12OmNzvz6Lc",
"parentPublication": {
"id": "proceedings/irc/2017/6724/0",
"title": "2017 First IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a449",
"title": "A Novel Remote Eye Gaze Tracking System Using Line Illumination Sources",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a449/19wB6HJrVmM",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900e977",
"title": "Learning-by-Novel-View-Synthesis for Full-Face Appearance-Based 3D Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900e977/1G56e2q120w",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c182",
"title": "Dynamic 3D Gaze from Afar: Deep Gaze Estimation from Temporal Eye-Head-Body Coordination",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c182/1H1mDm1L85i",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a375",
"title": "Neural 3D Gaze: 3D Pupil Localization and Gaze Tracking based on Anatomical Eye Model and Neural Refraction Correction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a375/1JrQRCijhMk",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzVXNJ6",
"title": "Applications of Computer Vision, IEEE Workshop on",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwE9OwM",
"doi": "10.1109/WACV.2013.6475042",
"title": "Unwrapping the eye for visible-spectrum gaze tracking on wearable devices",
"normalizedTitle": "Unwrapping the eye for visible-spectrum gaze tracking on wearable devices",
"abstract": "Wearable devices with gaze tracking can assist users in many daily-life tasks. When used for extended periods of time, it is desirable that such devices do not employ active illumination for safety reasons and to minimize interference from other light sources such as the sun. Most non active-illumination methods for gaze tracking attempt to locate the iris contour by fitting an ellipse. Although the camera projection causes the iris to appear as an ellipse in the eye image, it is actually a circle on the eye surface. Instead of searching for an ellipse in the eye image, the method proposed in this paper searches for a circle on the eye surface. To this end, the method calibrates a three-dimensional eye model based on the location of the corners of the eye. Using the 3D eye model, an input image is first transformed so that the eye's spherical surface is warped into a plane, thus “unwrapping” the eye. The iris circle is then detected on the unwrapped image by a three-step robust circle-fitting procedure. The location of the circle corresponds to the gaze orientation on the outside image. The method is fast to calibrate and runs in realtime. Extensive experimentation on embedded hardware and comparisons with alternative methods demonstrate the effectiveness of the proposed solution.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Wearable devices with gaze tracking can assist users in many daily-life tasks. When used for extended periods of time, it is desirable that such devices do not employ active illumination for safety reasons and to minimize interference from other light sources such as the sun. Most non active-illumination methods for gaze tracking attempt to locate the iris contour by fitting an ellipse. Although the camera projection causes the iris to appear as an ellipse in the eye image, it is actually a circle on the eye surface. Instead of searching for an ellipse in the eye image, the method proposed in this paper searches for a circle on the eye surface. To this end, the method calibrates a three-dimensional eye model based on the location of the corners of the eye. Using the 3D eye model, an input image is first transformed so that the eye's spherical surface is warped into a plane, thus “unwrapping” the eye. The iris circle is then detected on the unwrapped image by a three-step robust circle-fitting procedure. The location of the circle corresponds to the gaze orientation on the outside image. The method is fast to calibrate and runs in realtime. Extensive experimentation on embedded hardware and comparisons with alternative methods demonstrate the effectiveness of the proposed solution.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Wearable devices with gaze tracking can assist users in many daily-life tasks. When used for extended periods of time, it is desirable that such devices do not employ active illumination for safety reasons and to minimize interference from other light sources such as the sun. Most non active-illumination methods for gaze tracking attempt to locate the iris contour by fitting an ellipse. Although the camera projection causes the iris to appear as an ellipse in the eye image, it is actually a circle on the eye surface. Instead of searching for an ellipse in the eye image, the method proposed in this paper searches for a circle on the eye surface. To this end, the method calibrates a three-dimensional eye model based on the location of the corners of the eye. Using the 3D eye model, an input image is first transformed so that the eye's spherical surface is warped into a plane, thus “unwrapping” the eye. The iris circle is then detected on the unwrapped image by a three-step robust circle-fitting procedure. The location of the circle corresponds to the gaze orientation on the outside image. The method is fast to calibrate and runs in realtime. Extensive experimentation on embedded hardware and comparisons with alternative methods demonstrate the effectiveness of the proposed solution.",
"fno": "06475042",
"keywords": [
"Iris",
"Cameras",
"Robustness",
"Feature Extraction",
"Solid Modeling",
"Transforms",
"Calibration"
],
"authors": [
{
"affiliation": "The Robotics Institute, Carnegie Mellon University, 500 Forbes Ave. Pittsburgh, PA USA",
"fullName": "Bernardo Rodrigues Pires",
"givenName": "Bernardo Rodrigues",
"surname": "Pires",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Robotics Institute, Carnegie Mellon University, 500 Forbes Ave. Pittsburgh, PA USA",
"fullName": "Michael Devyver",
"givenName": "Michael",
"surname": "Devyver",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Robotics Institute, Carnegie Mellon University, 500 Forbes Ave. Pittsburgh, PA USA",
"fullName": "Akihiro Tsukada",
"givenName": "Akihiro",
"surname": "Tsukada",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Robotics Institute, Carnegie Mellon University, 500 Forbes Ave. Pittsburgh, PA USA",
"fullName": "Takeo Kanade",
"givenName": "Takeo",
"surname": "Kanade",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-01-01T00:00:00",
"pubType": "proceedings",
"pages": "369-376",
"year": "2013",
"issn": "1550-5790",
"isbn": "978-1-4673-5053-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06475043",
"articleId": "12OmNBU1jId",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06475041",
"articleId": "12OmNyoiYZx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032b003",
"title": "Real Time Eye Gaze Tracking with 3D Deformable Eye-Face Model",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b003/12OmNwNeYAV",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2003/1950/1/195010136",
"title": "Eye Gaze Estimation from a Single Image of One Eye",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2003/195010136/12OmNyv7mcM",
"parentPublication": {
"id": "proceedings/iccv/2003/1950/1",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130505",
"title": "Illumination-free gaze estimation method for first-person vision wearable device",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130505/12OmNzUPpfB",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2010/4217/0/05693863",
"title": "Point-of-Regard Measurement via Iris Contour with One Eye from Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2010/05693863/12OmNzYwcfJ",
"parentPublication": {
"id": "proceedings/ism/2010/4217/0",
"title": "2010 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08818661",
"title": "Realtime and Accurate 3D Eye Gaze Capture with DCNN-Based Iris and Pupil Segmentation",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08818661/1cRBtd0YTN6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a655",
"title": "A Multi-Modal Gaze Tracking Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a655/1ehBL8sk06I",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300d698",
"title": "RITnet: Real-time Semantic Segmentation of the Eye for Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300d698/1i5mpj3Jp3W",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09389650",
"title": "EllSeg: An Ellipse Segmentation Framework for Robust Gaze Tracking",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09389650/1smZUThnFi8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700a011",
"title": "Subject Guided Eye Image Synthesis with Application to Gaze Redirection",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700a011/1uqGyw32uVq",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a011",
"title": "Edge-Guided Near-Eye Image Analysis for Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a011/1yeCW4N7Y9a",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwJPMXt",
"title": "Optoelectronics and Image Processing, International Conference on",
"acronym": "icoip",
"groupId": "1800228",
"volume": "1",
"displayVolume": "1",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwqft0F",
"doi": "10.1109/ICOIP.2010.346",
"title": "A Simplified 3D Gaze Tracking Technology with Stereo Vision",
"normalizedTitle": "A Simplified 3D Gaze Tracking Technology with Stereo Vision",
"abstract": "A simplified 3D gaze tracking technology with stereo vision has been developed in this paper. A pair of stereo cameras and two point light sources are used to estimate 3D gaze of user's eye. Compared with other 3D systems, there are two improvements to simplify the whole system in this paper: First, 3D gaze is estimated as the line linking 3D cornea center and the virtual image of pupil center using the first Purkinje images of two LEDs, and no any user-dependent parameters are used in our paper, which require a complicated calibration procedure for each user. Second, to compute the gaze point on screen by intersecting the estimated 3D gaze, the positions of LEDs and the monitor need to be calibrated in advance. By introducing a planar mirror, two LEDs and the monitor are calibrated using a simple method without any other devices. Experimental results show that our gaze tracking system achieves an average estimation error of about 3 degree.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A simplified 3D gaze tracking technology with stereo vision has been developed in this paper. A pair of stereo cameras and two point light sources are used to estimate 3D gaze of user's eye. Compared with other 3D systems, there are two improvements to simplify the whole system in this paper: First, 3D gaze is estimated as the line linking 3D cornea center and the virtual image of pupil center using the first Purkinje images of two LEDs, and no any user-dependent parameters are used in our paper, which require a complicated calibration procedure for each user. Second, to compute the gaze point on screen by intersecting the estimated 3D gaze, the positions of LEDs and the monitor need to be calibrated in advance. By introducing a planar mirror, two LEDs and the monitor are calibrated using a simple method without any other devices. Experimental results show that our gaze tracking system achieves an average estimation error of about 3 degree.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A simplified 3D gaze tracking technology with stereo vision has been developed in this paper. A pair of stereo cameras and two point light sources are used to estimate 3D gaze of user's eye. Compared with other 3D systems, there are two improvements to simplify the whole system in this paper: First, 3D gaze is estimated as the line linking 3D cornea center and the virtual image of pupil center using the first Purkinje images of two LEDs, and no any user-dependent parameters are used in our paper, which require a complicated calibration procedure for each user. Second, to compute the gaze point on screen by intersecting the estimated 3D gaze, the positions of LEDs and the monitor need to be calibrated in advance. By introducing a planar mirror, two LEDs and the monitor are calibrated using a simple method without any other devices. Experimental results show that our gaze tracking system achieves an average estimation error of about 3 degree.",
"fno": "4252a131",
"keywords": [
"Gaze Tracking",
"3 D Gaze",
"Stereo Cameras",
"Human Computer Interaction"
],
"authors": [
{
"affiliation": null,
"fullName": "Ke Zhang",
"givenName": "Ke",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xinbo Zhao",
"givenName": "Xinbo",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhong Ma",
"givenName": "Zhong",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yi Man",
"givenName": "Yi",
"surname": "Man",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icoip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-11-01T00:00:00",
"pubType": "proceedings",
"pages": "131-134",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4252-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4252a127",
"articleId": "12OmNwBBqe4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4252a135",
"articleId": "12OmNqJq4BI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dimpvt/2011/4369/0/4369a057",
"title": "Scene Segmentation Assisted by Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2011/4369a057/12OmNASraUu",
"parentPublication": {
"id": "proceedings/3dimpvt/2011/4369/0",
"title": "2011 International Conference on 3D Imaging, Modeling, Processing, Visualization and Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995675",
"title": "Probabilistic gaze estimation without active personal calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995675/12OmNC8MsAV",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2006/2825/0/04155710",
"title": "Gaze Tracking by Using Factorized Likelihoods Particle Filtering and Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2006/04155710/12OmNx7G625",
"parentPublication": {
"id": "proceedings/3dpvt/2006/2825/0",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2003/1900/2/190020451",
"title": "Eye Gaze Tracking Using an Active Stereo Head",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2003/190020451/12OmNxRWI2Y",
"parentPublication": {
"id": "proceedings/cvpr/2003/1900/2",
"title": "2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ratfg-rts/1999/0378/0/03780077",
"title": "Real-Time Stereo Face Tracking System for Visual Human Interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ratfg-rts/1999/03780077/12OmNxbEtJK",
"parentPublication": {
"id": "proceedings/ratfg-rts/1999/0378/0",
"title": "Recognition, Analysis, & Tracking of Faces & Gestures in Real -Time Systems, IEEE ICCV Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fbit/2007/2999/0/29990617",
"title": "3D Gaze Tracking and Analysis for Attentive Human Computer Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/fbit/2007/29990617/12OmNy2agTG",
"parentPublication": {
"id": "proceedings/fbit/2007/2999/0",
"title": "2007 Frontiers in the Convergence of Bioscience and Information Technologies (FBIT '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfcc/2009/3591/0/3591a487",
"title": "Stereo Vision Tracking System",
"doi": null,
"abstractUrl": "/proceedings-article/icfcc/2009/3591a487/12OmNyjccz5",
"parentPublication": {
"id": "proceedings/icfcc/2009/3591/0",
"title": "2009 International Conference on Future Computer and Communication (ICFCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504694",
"title": "Gaze prediction using machine learning for dynamic stereo manipulation in games",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504694/12OmNzWx00N",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2002/1602/0/16020101",
"title": "Non-Contact Eye Gaze Tracking System by Mapping of Corneal Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020101/12OmNzgwmIY",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a117",
"title": "Optical Gaze Tracking with Spatially-Sparse Single-Pixel Detectors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a117/1pyswxBB73y",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrQRCijhMk",
"doi": "10.1109/ISMAR55827.2022.00053",
"title": "Neural 3D Gaze: 3D Pupil Localization and Gaze Tracking based on Anatomical Eye Model and Neural Refraction Correction",
"normalizedTitle": "Neural 3D Gaze: 3D Pupil Localization and Gaze Tracking based on Anatomical Eye Model and Neural Refraction Correction",
"abstract": "Eye tracking has already made its way to current commercial wearable display devices, and is becoming increasingly important for virtual and augmented reality applications. However, the existing model-based eye tracking solutions are not capable of conducting very accurate gaze angle measurements, and may not be sufficient to solve challenging display problems such as pupil steering or eyebox expansion. In this paper, we argue that accurate detection and localization of pupil in 3D space is a necessary intermediate step in model-based eye tracking. Existing methods and datasets either ignore evaluating the accuracy of 3D pupil localization or evaluate it only on synthetic data. To this end, we capture the first 3D pupilgaze-measurement dataset using a high precision setup with head stabilization and release it as the first benchmark dataset to evaluate both 3D pupil localization and gaze tracking methods. Furthermore, we utilize an advanced eye model to replace the commonly used oversimplified eye model. Leveraging the eye model, we propose a novel 3D pupil localization method with a deep learning-based corneal refraction correction. We demonstrate that our method outperforms the state-of-the-art works by reducing the 3D pupil localization error by 47.5% and the gaze estimation error by 18.7%. Our dataset and codes can be found here: link.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Eye tracking has already made its way to current commercial wearable display devices, and is becoming increasingly important for virtual and augmented reality applications. However, the existing model-based eye tracking solutions are not capable of conducting very accurate gaze angle measurements, and may not be sufficient to solve challenging display problems such as pupil steering or eyebox expansion. In this paper, we argue that accurate detection and localization of pupil in 3D space is a necessary intermediate step in model-based eye tracking. Existing methods and datasets either ignore evaluating the accuracy of 3D pupil localization or evaluate it only on synthetic data. To this end, we capture the first 3D pupilgaze-measurement dataset using a high precision setup with head stabilization and release it as the first benchmark dataset to evaluate both 3D pupil localization and gaze tracking methods. Furthermore, we utilize an advanced eye model to replace the commonly used oversimplified eye model. Leveraging the eye model, we propose a novel 3D pupil localization method with a deep learning-based corneal refraction correction. We demonstrate that our method outperforms the state-of-the-art works by reducing the 3D pupil localization error by 47.5% and the gaze estimation error by 18.7%. Our dataset and codes can be found here: link.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Eye tracking has already made its way to current commercial wearable display devices, and is becoming increasingly important for virtual and augmented reality applications. However, the existing model-based eye tracking solutions are not capable of conducting very accurate gaze angle measurements, and may not be sufficient to solve challenging display problems such as pupil steering or eyebox expansion. In this paper, we argue that accurate detection and localization of pupil in 3D space is a necessary intermediate step in model-based eye tracking. Existing methods and datasets either ignore evaluating the accuracy of 3D pupil localization or evaluate it only on synthetic data. To this end, we capture the first 3D pupilgaze-measurement dataset using a high precision setup with head stabilization and release it as the first benchmark dataset to evaluate both 3D pupil localization and gaze tracking methods. Furthermore, we utilize an advanced eye model to replace the commonly used oversimplified eye model. Leveraging the eye model, we propose a novel 3D pupil localization method with a deep learning-based corneal refraction correction. We demonstrate that our method outperforms the state-of-the-art works by reducing the 3D pupil localization error by 47.5% and the gaze estimation error by 18.7%. Our dataset and codes can be found here: link.",
"fno": "532500a375",
"keywords": [
"Augmented Reality",
"Computer Displays",
"Deep Learning Artificial Intelligence",
"Eye",
"Gaze Tracking",
"Human Computer Interaction",
"3 D Pupil Localization Error",
"3 D Pupil Localization Method",
"3 D Pupilgaze Measurement Dataset",
"Accurate Gaze Angle Measurements",
"Advanced Eye Model",
"Anatomical Eye Model",
"Augmented Reality Applications",
"Commercial Wearable Display Devices",
"Deep Learning Based Corneal Refraction Correction",
"Gaze Estimation Error",
"Gaze Tracking Methods",
"Model Based Eye Tracking Solutions",
"Neural 3 D Gaze",
"Neural Refraction Correction",
"Oversimplified Eye Model",
"Pupil Steering",
"Virtual Reality Applications",
"Location Awareness",
"Solid Modeling",
"Analytical Models",
"Three Dimensional Displays",
"Head",
"Gaze Tracking",
"Benchmark Testing",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Mixed Augmented Reality",
"Artificial Intelligence",
"Computer Vision",
"Computer Vision Problems"
],
"authors": [
{
"affiliation": "UNC Chapel Hill",
"fullName": "Conny Lu",
"givenName": "Conny",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Princeton University",
"fullName": "Praneeth Chakravarthula",
"givenName": "Praneeth",
"surname": "Chakravarthula",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UNC Chapel Hill",
"fullName": "Kaihao Liu",
"givenName": "Kaihao",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UNC Chapel Hill",
"fullName": "Xixiang Liu",
"givenName": "Xixiang",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UNC Chapel Hill",
"fullName": "Siyuan Li",
"givenName": "Siyuan",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UNC Chapel Hill",
"fullName": "Henry Fuchs",
"givenName": "Henry",
"surname": "Fuchs",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "375-383",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "532500a365",
"articleId": "1JrRb1CKQtq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a384",
"articleId": "1JrQZPMRLW0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/gcis/2009/3571/2/3571b133",
"title": "Key Techniques of Eye Gaze Tracking Based on Pupil Corneal Reflection",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571b133/12OmNA0vo1q",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/2",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b162",
"title": "3-D Gaze Tracking Using Pupil Contour Features",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b162/12OmNBuL1hu",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b156",
"title": "Gaze Estimation Based on 3D Face Structure and Pupil Centers",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b156/12OmNvCi45y",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoip/2010/4252/1/4252a131",
"title": "A Simplified 3D Gaze Tracking Technology with Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252a131/12OmNwqft0F",
"parentPublication": {
"id": "proceedings/icoip/2010/4252/2",
"title": "Optoelectronics and Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d162",
"title": "Monocular Free-Head 3D Gaze Tracking with Deep Learning and Geometry Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d162/12OmNxbmSBT",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2002/1602/0/16020101",
"title": "Non-Contact Eye Gaze Tracking System by Mapping of Corneal Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020101/12OmNzgwmIY",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2015/1986/0/1986a176",
"title": "Mobile 3D Gaze Tracking Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2015/1986a176/12OmNzzxusS",
"parentPublication": {
"id": "proceedings/crv/2015/1986/0",
"title": "2015 12th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956312",
"title": "A Joint Cascaded Framework for Simultaneous Eye State, Eye Center, and Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956312/1IHq8em8jug",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08818661",
"title": "Realtime and Accurate 3D Eye Gaze Capture with DCNN-Based Iris and Pupil Segmentation",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08818661/1cRBtd0YTN6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09389490",
"title": "Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09389490/1smZT5W55V6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1ehBy9p57Q4",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"acronym": "ithings-greencom-cpscom-smartdata",
"groupId": "1800308",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1ehBL8sk06I",
"doi": "10.1109/iThings/GreenCom/CPSCom/SmartData.2019.00126",
"title": "A Multi-Modal Gaze Tracking Algorithm",
"normalizedTitle": "A Multi-Modal Gaze Tracking Algorithm",
"abstract": "Gaze tracking is an assistant system of human-computer interaction. Aiming at the problem of high misjudgment rate and long time-consuming of traditional iris location methods, this paper proposes a gaze tracking method based on human eye geometric characteristics to improve the tracking accuracy in 2D environment. Firstly, the human face is located by face location algorithm and the position of human eye is estimated roughly. Then the iris template is built by iris image, and the iris center location algorithm is used to locate the iris center position. Finally, the eyes corners and iris center points are extracted to locate the eye area accurately and obtain the binocular image. The binocular images are input into the feature extraction network as multi-modal information in parallel, and the convoluted feature channels are reconstructed using the weight redistribution module in the network. Then the reconstructed features are fused in the full connection layer. Finally, the output layer is used to classify the reconstructed features. Experiments were carried out on a self-built screen block data set. For 12 classified data, the lowest recognition error rate is 5.34%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Gaze tracking is an assistant system of human-computer interaction. Aiming at the problem of high misjudgment rate and long time-consuming of traditional iris location methods, this paper proposes a gaze tracking method based on human eye geometric characteristics to improve the tracking accuracy in 2D environment. Firstly, the human face is located by face location algorithm and the position of human eye is estimated roughly. Then the iris template is built by iris image, and the iris center location algorithm is used to locate the iris center position. Finally, the eyes corners and iris center points are extracted to locate the eye area accurately and obtain the binocular image. The binocular images are input into the feature extraction network as multi-modal information in parallel, and the convoluted feature channels are reconstructed using the weight redistribution module in the network. Then the reconstructed features are fused in the full connection layer. Finally, the output layer is used to classify the reconstructed features. Experiments were carried out on a self-built screen block data set. For 12 classified data, the lowest recognition error rate is 5.34%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Gaze tracking is an assistant system of human-computer interaction. Aiming at the problem of high misjudgment rate and long time-consuming of traditional iris location methods, this paper proposes a gaze tracking method based on human eye geometric characteristics to improve the tracking accuracy in 2D environment. Firstly, the human face is located by face location algorithm and the position of human eye is estimated roughly. Then the iris template is built by iris image, and the iris center location algorithm is used to locate the iris center position. Finally, the eyes corners and iris center points are extracted to locate the eye area accurately and obtain the binocular image. The binocular images are input into the feature extraction network as multi-modal information in parallel, and the convoluted feature channels are reconstructed using the weight redistribution module in the network. Then the reconstructed features are fused in the full connection layer. Finally, the output layer is used to classify the reconstructed features. Experiments were carried out on a self-built screen block data set. For 12 classified data, the lowest recognition error rate is 5.34%.",
"fno": "298000a655",
"keywords": [
"Eye",
"Face Recognition",
"Feature Extraction",
"Human Computer Interaction",
"Image Classification",
"Image Reconstruction",
"Iris Recognition",
"Multimodal Gaze Tracking Algorithm",
"Assistant System",
"Human Computer Interaction",
"Human Eye Geometric Characteristics",
"Tracking Accuracy",
"Human Face",
"Face Location Algorithm",
"Iris Template",
"Iris Image",
"Iris Center Location Algorithm",
"Iris Center Position",
"Eyes Corners",
"Eye Area",
"Binocular Image",
"Feature Extraction Network",
"Multimodal Information",
"Convoluted Feature Channels",
"2 D Environment",
"Iris Center Point Extraction",
"Weight Redistribution Module",
"Full Connection Layer",
"Reconstructed Feature Classification",
"Self Built Screen Block Data Set",
"Feature Extraction",
"Iris Recognition",
"Face",
"Iris",
"Gaze Tracking",
"Image Reconstruction",
"Gaze Tracking",
"Multi Modal",
"Geometric Characteristics"
],
"authors": [
{
"affiliation": "Changzhou University",
"fullName": "Haiming Su",
"givenName": "Haiming",
"surname": "Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Changzhou University & Jiangsu Province Networking and Mobile Internet Technology Engineering Key Laboratory",
"fullName": "Zhenjie Hou",
"givenName": "Zhenjie",
"surname": "Hou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Changzhou University",
"fullName": "Juan Huan",
"givenName": "Juan",
"surname": "Huan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Changzhou University",
"fullName": "Ke Yan",
"givenName": "Ke",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Changzhou University",
"fullName": "Hao Ding",
"givenName": "Hao",
"surname": "Ding",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ithings-greencom-cpscom-smartdata",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-07-01T00:00:00",
"pubType": "proceedings",
"pages": "655-660",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-2980-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "298000a649",
"articleId": "1ehBE7SrRkI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "298000a661",
"articleId": "1ehBJBERKOQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-graphics/2015/8020/0/07450411",
"title": "Real Time Learning Evaluation Based on Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450411/12OmNBNM8QX",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2013/5053/0/06475042",
"title": "Unwrapping the eye for visible-spectrum gaze tracking on wearable devices",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2013/06475042/12OmNwE9OwM",
"parentPublication": {
"id": "proceedings/wacv/2013/5053/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798273",
"title": "Required Accuracy of Gaze Tracking for Varifocal Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798273/1cJ0T4CUJTq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08818661",
"title": "Realtime and Accurate 3D Eye Gaze Capture with DCNN-Based Iris and Pupil Segmentation",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08818661/1cRBtd0YTN6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998133",
"title": "The Security-Utility Trade-off for Iris Authentication and Eye Animation for Social Virtual Avatars",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998133/1hrXcnyAOzu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300d660",
"title": "U2Eyes: A Binocular Dataset for Eye Tracking and Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300d660/1i5mrEVhtbq",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090461",
"title": "Front Camera Eye Tracking For Mobile VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09389650",
"title": "EllSeg: An Ellipse Segmentation Framework for Robust Gaze Tracking",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09389650/1smZUThnFi8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412205",
"title": "Adaptive Feature Fusion Network for Gaze Tracking in Mobile Tablets",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412205/1tmjcNMinsc",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a011",
"title": "Edge-Guided Near-Eye Image Analysis for Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a011/1yeCW4N7Y9a",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeD3XlUpBS",
"doi": "10.1109/ISMAR52148.2021.00053",
"title": "TEyeD: Over 20 Million Real-World Eye Images with Pupil, Eyelid, and Iris 2D and 3D Segmentations, 2D and 3D Landmarks, 3D Eyeball, Gaze Vector, and Eye Movement Types",
"normalizedTitle": "TEyeD: Over 20 Million Real-World Eye Images with Pupil, Eyelid, and Iris 2D and 3D Segmentations, 2D and 3D Landmarks, 3D Eyeball, Gaze Vector, and Eye Movement Types",
"abstract": "We present TEyeD, the world’s largest unified public data set of eye images taken with head-mounted devices. TEyeD was acquired with seven different head-mounted eye trackers. Among them, two eye trackers were integrated into virtual reality (VR) or augmented reality (AR) devices. The images in TEyeD were obtained from various tasks, including car rides, simulator rides, outdoor sports activities, and daily indoor activities. The data set includes 2D&3D landmarks, semantic segmentation, 3D eyeball annotation and the gaze vector and eye movement types for all images. Landmarks and semantic segmentation are provided for the pupil, iris and eyelids. Video lengths vary from a few minutes to several hours. With more than 20 million carefully annotated images, TEyeD provides a unique, coherent resource and a valuable foundation for advancing research in the field of computer vision, eye tracking and gaze estimation in modern VR and AR applications. Data and code at DOWNLOAD LINK.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present TEyeD, the world’s largest unified public data set of eye images taken with head-mounted devices. TEyeD was acquired with seven different head-mounted eye trackers. Among them, two eye trackers were integrated into virtual reality (VR) or augmented reality (AR) devices. The images in TEyeD were obtained from various tasks, including car rides, simulator rides, outdoor sports activities, and daily indoor activities. The data set includes 2D&3D landmarks, semantic segmentation, 3D eyeball annotation and the gaze vector and eye movement types for all images. Landmarks and semantic segmentation are provided for the pupil, iris and eyelids. Video lengths vary from a few minutes to several hours. With more than 20 million carefully annotated images, TEyeD provides a unique, coherent resource and a valuable foundation for advancing research in the field of computer vision, eye tracking and gaze estimation in modern VR and AR applications. Data and code at DOWNLOAD LINK.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present TEyeD, the world’s largest unified public data set of eye images taken with head-mounted devices. TEyeD was acquired with seven different head-mounted eye trackers. Among them, two eye trackers were integrated into virtual reality (VR) or augmented reality (AR) devices. The images in TEyeD were obtained from various tasks, including car rides, simulator rides, outdoor sports activities, and daily indoor activities. The data set includes 2D&3D landmarks, semantic segmentation, 3D eyeball annotation and the gaze vector and eye movement types for all images. Landmarks and semantic segmentation are provided for the pupil, iris and eyelids. Video lengths vary from a few minutes to several hours. With more than 20 million carefully annotated images, TEyeD provides a unique, coherent resource and a valuable foundation for advancing research in the field of computer vision, eye tracking and gaze estimation in modern VR and AR applications. Data and code at DOWNLOAD LINK.",
"fno": "015800a367",
"keywords": [
"Augmented Reality",
"Computer Vision",
"Eye",
"Gaze Tracking",
"Helmet Mounted Displays",
"Human Computer Interaction",
"Image Segmentation",
"Sport",
"Virtual Reality",
"Gaze Vector",
"Eye Movement Types",
"Semantic Segmentation",
"Eyelids",
"20 Million Carefully Annotated Images",
"T Eye D",
"Eye Tracking",
"Gaze Estimation",
"20 Million Real World Eye Images",
"Iris 2 D",
"3 D Segmentations",
"Head Mounted Devices",
"Seven Different Head Mounted Eye Trackers",
"Outdoor Sports Activities",
"Daily Indoor Activities",
"Data Set",
"3 D Eyeball Annotation",
"Image Segmentation",
"Iris",
"Computer Vision",
"Three Dimensional Displays",
"Annotations",
"Tracking",
"Semantics",
"Computing Methodologies",
"Artificial Intelligence",
"Computer Vision",
"Computer Vision Problems",
"Machine Learning General And Reference",
"Document Types",
"Surveys And Overviews"
],
"authors": [
{
"affiliation": "University,Human Computer Interaction,Tübingen",
"fullName": "Wolfgang Fuhl",
"givenName": "Wolfgang",
"surname": "Fuhl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Data Science & Analytics, University,Tübingen",
"fullName": "Gjergji Kasneci",
"givenName": "Gjergji",
"surname": "Kasneci",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University,Human Computer Interaction,Tübingen",
"fullName": "Enkelejda Kasneci",
"givenName": "Enkelejda",
"surname": "Kasneci",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "367-375",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "015800a357",
"articleId": "1yeD49qIeK4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a376",
"articleId": "1yeCUnrXwiI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032b003",
"title": "Real Time Eye Gaze Tracking with 3D Deformable Eye-Face Model",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b003/12OmNwNeYAV",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543784",
"title": "Viewing direction estimation based on 3D eyeball construction for HRI",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543784/12OmNy3AgtA",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2017/4941/0/07912208",
"title": "Gaze Estimation Based on Eyeball-Head Dynamics",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2017/07912208/12OmNzWfoVQ",
"parentPublication": {
"id": "proceedings/wacvw/2017/4941/0",
"title": "2017 IEEE Winter Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c221",
"title": "Unraveling Human Perception of Facial Aging Using Eye Gaze",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c221/17D45WwsQ8l",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a375",
"title": "Neural 3D Gaze: 3D Pupil Localization and Gaze Tracking based on Anatomical Eye Model and Neural Refraction Correction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a375/1JrQRCijhMk",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08818661",
"title": "Realtime and Accurate 3D Eye Gaze Capture with DCNN-Based Iris and Pupil Segmentation",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08818661/1cRBtd0YTN6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090461",
"title": "Front Camera Eye Tracking For Mobile VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09389490",
"title": "Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09389490/1smZT5W55V6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412066",
"title": "Detection and Correspondence Matching of Corneal Reflections for Eye Tracking Using Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412066/1tmjH1aA4dG",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700a011",
"title": "Subject Guided Eye Image Synthesis with Application to Gaze Redirection",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700a011/1uqGyw32uVq",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwB2dUd",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwF0BS2",
"doi": "10.1109/3DUI.2016.7460041",
"title": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments",
"normalizedTitle": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments",
"abstract": "The objective of this research is to improve the accessibility of Head-Mounted Displays (HMDs) for users with balance impairments while they are in immersive Virtual Environments (VEs). Previous research has shown that most users experience some imbalance in a fully immersive VE (i.e., wearing a HMD that blocks the user's view of the real world). However, this imbalance is significantly worse in users with balance deficits. Persons with balance impairments often depend more on visual feedback than persons without impairment to maintain their balance. Thus, this research aims to determine an effective visual feedback technique to improve balance of persons while using VEs to improve the accessibility of HMDs. In order to do that, we conducted a study with seven users without impairment and seven users with balance impairments due to Multiple Sclerosis (MS). In the study, each user wore a tracked head-mounted display while they played a balance game in which they had to dodge a series of virtual tennis balls thrown at them while standing on a Wii balance board. We investigated how a static rest frame (SRF) (e.g., a cross-hair always rendered in the same position on the user's display screen) impacts the participants' balances in VR. Results indicate that a SRF significantly improves balance in VR for users with MS. Based on these results, we propose guidelines for designing more accessible VEs for persons with balance impairments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The objective of this research is to improve the accessibility of Head-Mounted Displays (HMDs) for users with balance impairments while they are in immersive Virtual Environments (VEs). Previous research has shown that most users experience some imbalance in a fully immersive VE (i.e., wearing a HMD that blocks the user's view of the real world). However, this imbalance is significantly worse in users with balance deficits. Persons with balance impairments often depend more on visual feedback than persons without impairment to maintain their balance. Thus, this research aims to determine an effective visual feedback technique to improve balance of persons while using VEs to improve the accessibility of HMDs. In order to do that, we conducted a study with seven users without impairment and seven users with balance impairments due to Multiple Sclerosis (MS). In the study, each user wore a tracked head-mounted display while they played a balance game in which they had to dodge a series of virtual tennis balls thrown at them while standing on a Wii balance board. We investigated how a static rest frame (SRF) (e.g., a cross-hair always rendered in the same position on the user's display screen) impacts the participants' balances in VR. Results indicate that a SRF significantly improves balance in VR for users with MS. Based on these results, we propose guidelines for designing more accessible VEs for persons with balance impairments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The objective of this research is to improve the accessibility of Head-Mounted Displays (HMDs) for users with balance impairments while they are in immersive Virtual Environments (VEs). Previous research has shown that most users experience some imbalance in a fully immersive VE (i.e., wearing a HMD that blocks the user's view of the real world). However, this imbalance is significantly worse in users with balance deficits. Persons with balance impairments often depend more on visual feedback than persons without impairment to maintain their balance. Thus, this research aims to determine an effective visual feedback technique to improve balance of persons while using VEs to improve the accessibility of HMDs. In order to do that, we conducted a study with seven users without impairment and seven users with balance impairments due to Multiple Sclerosis (MS). In the study, each user wore a tracked head-mounted display while they played a balance game in which they had to dodge a series of virtual tennis balls thrown at them while standing on a Wii balance board. We investigated how a static rest frame (SRF) (e.g., a cross-hair always rendered in the same position on the user's display screen) impacts the participants' balances in VR. Results indicate that a SRF significantly improves balance in VR for users with MS. Based on these results, we propose guidelines for designing more accessible VEs for persons with balance impairments.",
"fno": "07460041",
"keywords": [
"Visualization",
"Atmospheric Measurements",
"Particle Measurements",
"Games",
"Legged Locomotion",
"Virtual Reality",
"Electronic Mail",
"Head Mounted Display",
"Virtual Reality",
"Balance",
"Accessibility"
],
"authors": [
{
"affiliation": "University of Texas at San Antonio",
"fullName": "Sharif Mohammad Shahnewaz Ferdous",
"givenName": "Sharif Mohammad Shahnewaz",
"surname": "Ferdous",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Texas at San Antonio",
"fullName": "Imtiaz Muhammad Arafat",
"givenName": "Imtiaz Muhammad",
"surname": "Arafat",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Texas at San Antonio",
"fullName": "John Quarles",
"givenName": "John",
"surname": "Quarles",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "121-128",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-0842-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07460040",
"articleId": "12OmNBqdr48",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07460042",
"articleId": "12OmNqAU6oL",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pive/2012/1218/0/06229792",
"title": "Differences in presence between healthy users and users with multiple sclerosis",
"doi": null,
"abstractUrl": "/proceedings-article/pive/2012/06229792/12OmNBSSV9H",
"parentPublication": {
"id": "proceedings/pive/2012/1218/0",
"title": "2012 IEEE VR Workshop on Perceptual Illusions in Virtual Environments",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550192",
"title": "Latency and avatars in Virtual Environments and the effects on gait for persons with mobility impairments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550192/12OmNBkP3y4",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2016/3834/0/3834a282",
"title": "Dynamic and Static Balance in Persons with Different Arch Height and Impacts of an Arch Support",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2016/3834a282/12OmNCwUmAP",
"parentPublication": {
"id": "proceedings/bibe/2016/3834/0",
"title": "2016 IEEE 16th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2017/5812/0/08056608",
"title": "Integratingfall-risk assessments within a simple balance exergame",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2017/08056608/12OmNvlg8h7",
"parentPublication": {
"id": "proceedings/vs-games/2017/5812/0",
"title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802059",
"title": "A unique way to increase presence of mobility impaired users — Increasing confidence in balance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802059/12OmNxGAL3n",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2016/3834/0/3834a278",
"title": "The Balance Ability of Fallers and Non-Fallers in Psychiatric Patients at a Long Term Care Unit",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2016/3834a278/12OmNxVlTBL",
"parentPublication": {
"id": "proceedings/bibe/2016/3834/0",
"title": "2016 IEEE 16th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892356",
"title": "Improve accessibility of virtual and augmented reality for people with balance impairments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892356/12OmNxYL5bz",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504764",
"title": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504764/12OmNy6qfPt",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446488",
"title": "Investigating the Reason for Increased Postural Instability in Virtual Reality for Persons with Balance Impairments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446488/13bd1gJ1v0N",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a782",
"title": "Auditory Feedback for Standing Balance Improvement in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a782/1CJc6WO5JBu",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyoSbiA",
"title": "2016 IEEE 16th International Conference on Bioinformatics and Bioengineering (BIBE)",
"acronym": "bibe",
"groupId": "1000075",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxVlTBL",
"doi": "10.1109/BIBE.2016.58",
"title": "The Balance Ability of Fallers and Non-Fallers in Psychiatric Patients at a Long Term Care Unit",
"normalizedTitle": "The Balance Ability of Fallers and Non-Fallers in Psychiatric Patients at a Long Term Care Unit",
"abstract": "With the increasing age, chronicity of illness, and medication side-effects, the psychiatric patients tend to have higher incidence of commodity and rate of accidents. Falls is the most devastating accidents and the ability to maintain balance is pivotal for the well-beings of this population. It appeared that different patterns of fall risk factors may be evident in psychiatric patients but with few scientific evidence. The existing tools for evaluation of risk of falls were not applicable on this population. This study intended to quantitatively measure the balance ability in psychiatric patients with and without fall history. The results could further contribute to falls prevention in long term care unit.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the increasing age, chronicity of illness, and medication side-effects, the psychiatric patients tend to have higher incidence of commodity and rate of accidents. Falls is the most devastating accidents and the ability to maintain balance is pivotal for the well-beings of this population. It appeared that different patterns of fall risk factors may be evident in psychiatric patients but with few scientific evidence. The existing tools for evaluation of risk of falls were not applicable on this population. This study intended to quantitatively measure the balance ability in psychiatric patients with and without fall history. The results could further contribute to falls prevention in long term care unit.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the increasing age, chronicity of illness, and medication side-effects, the psychiatric patients tend to have higher incidence of commodity and rate of accidents. Falls is the most devastating accidents and the ability to maintain balance is pivotal for the well-beings of this population. It appeared that different patterns of fall risk factors may be evident in psychiatric patients but with few scientific evidence. The existing tools for evaluation of risk of falls were not applicable on this population. This study intended to quantitatively measure the balance ability in psychiatric patients with and without fall history. The results could further contribute to falls prevention in long term care unit.",
"fno": "3834a278",
"keywords": [
"Legged Locomotion",
"Foot",
"Atmospheric Measurements",
"Particle Measurements",
"Hospitals",
"Sociology",
"Statistics",
"Co P",
"Balance Ability",
"Health Related Quality Of Life"
],
"authors": [
{
"affiliation": null,
"fullName": "San-Ping Wang",
"givenName": "San-Ping",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jen-Suh Chern",
"givenName": "Jen-Suh",
"surname": "Chern",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jer-Hao Chang",
"givenName": "Jer-Hao",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bo-Jian Wu",
"givenName": "Bo-Jian",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hsiao-Ju Sun",
"givenName": "Hsiao-Ju",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiunn-Ying Liou",
"givenName": "Jiunn-Ying",
"surname": "Liou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibe",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-10-01T00:00:00",
"pubType": "proceedings",
"pages": "278-281",
"year": "2016",
"issn": "2471-7819",
"isbn": "978-1-5090-3834-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3834a274",
"articleId": "12OmNxwncAG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3834a282",
"articleId": "12OmNCwUmAP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/percomw/2015/8425/0/07134077",
"title": "Investigation of gait characteristics in glaucoma patients with a shoe-integrated sensing system",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2015/07134077/12OmNrJAdQf",
"parentPublication": {
"id": "proceedings/percomw/2015/8425/0",
"title": "2015 IEEE International Conference on Pervasive Computing and Communication Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ngmast/2016/0949/0/07801465",
"title": "Immersive Virtual Reality as a Supplement in the Rehabilitation Program of Post-Stroke Patients",
"doi": null,
"abstractUrl": "/proceedings-article/ngmast/2016/07801465/12OmNrMZpyR",
"parentPublication": {
"id": "proceedings/ngmast/2016/0949/0",
"title": "2016 10th International Conference on Next-Generation Mobile Applications, Security and Technologies (NGMAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc-scalcom/2015/7211/0/07518489",
"title": "Fall Detection Using Plantar Inclinometer Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc-scalcom/2015/07518489/12OmNvrdI7N",
"parentPublication": {
"id": "proceedings/uic-atc-scalcom/2015/7211/0",
"title": "2015 IEEE 12th Intl Conf on Ubiquitous Intelligence and Computing and 2015 IEEE 12th Intl Conf on Autonomic and Trusted Computing and 2015 IEEE 15th Intl Conf on Scalable Computing and Communications and Its Associated Workshops (UIC-ATC-ScalCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scamc/1978/9999/0/00679911",
"title": "Computer Interview Problem Assessment Of Psychiatric Patients",
"doi": null,
"abstractUrl": "/proceedings-article/scamc/1978/00679911/12OmNwbukkG",
"parentPublication": {
"id": "proceedings/scamc/1978/9999/0",
"title": "1978 The Second Annual Symposium on Computer Application in Medical Care",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imis/2013/4974/0/4974a684",
"title": "Medical Care Quality and Operating Performance of Taiwan's Psychiatric Hospitals -- BNV-DEA and CND-DEA Applications",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2013/4974a684/12OmNxw5Brv",
"parentPublication": {
"id": "proceedings/imis/2013/4974/0",
"title": "2013 Seventh International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2015/6564/2/6564b733",
"title": "A Novel Wireless System to Monitor Gait Using Smartshoe-Worn Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2015/6564b733/12OmNyQYtaE",
"parentPublication": {
"id": "compsac/2015/6564/2",
"title": "2015 IEEE 39th Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0/07847145",
"title": "Fall Detection Based on Tilt Angle and Acceleration Variations",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom-bigdatase-i-spa/2016/07847145/12OmNyYDDLx",
"parentPublication": {
"id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0",
"title": "2016 IEEE Trustcom/BigDataSE/ISPA",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitcs/2014/6541/0/07021769",
"title": "Evaluation of Sitting Balance in Adolescent Idiopathic Scoliosis Patients with Pelvic Rotation Using Balance Board System with Accelerometer",
"doi": null,
"abstractUrl": "/proceedings-article/icitcs/2014/07021769/12OmNzd7bvt",
"parentPublication": {
"id": "proceedings/icitcs/2014/6541/0",
"title": "2014 International Conference on IT Convergence and Security (ICITCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08758372",
"title": "Evaluating Balance Recovery Techniques for Users Wearing Head-Mounted Display in VR",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08758372/1bwCk2J4N7q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcomp/2019/1689/0/168900a434",
"title": "A Novel Two-Step Fall Detection Method Using Smartphone Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2019/168900a434/1cdOtfSl1Vm",
"parentPublication": {
"id": "proceedings/smartcomp/2019/1689/0",
"title": "2019 IEEE International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy6qfPt",
"doi": "10.1109/VR.2016.7504764",
"title": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments",
"normalizedTitle": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments",
"abstract": "The objective of this research is to improve the accessibility of Head-Mounted Displays (HMDs) for users with balance impairments while they are in immersive Virtual Environments (VEs). Previous research has shown that most users experience some imbalance in a fully immersive VE. However, this imbalance is significantly worse in users with balance deficits. Thus, this research aims to determine an effective visual feedback technique to improve balance of persons while using VEs to improve the accessibility of HMDs. In order to do that, we conducted a study with seven users without impairment and seven users with balance impairments due to Multiple Sclerosis (MS). We investigated how a static reference frame (SRF) (e.g., a cross-hair always rendered in the same position on the user's display screen) impacts the participants' balances in VR. Results indicate that a SRF significantly improves balance in VR for users with MS. Based on these results, we propose guidelines for designing more accessible VEs for persons with balance impairments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The objective of this research is to improve the accessibility of Head-Mounted Displays (HMDs) for users with balance impairments while they are in immersive Virtual Environments (VEs). Previous research has shown that most users experience some imbalance in a fully immersive VE. However, this imbalance is significantly worse in users with balance deficits. Thus, this research aims to determine an effective visual feedback technique to improve balance of persons while using VEs to improve the accessibility of HMDs. In order to do that, we conducted a study with seven users without impairment and seven users with balance impairments due to Multiple Sclerosis (MS). We investigated how a static reference frame (SRF) (e.g., a cross-hair always rendered in the same position on the user's display screen) impacts the participants' balances in VR. Results indicate that a SRF significantly improves balance in VR for users with MS. Based on these results, we propose guidelines for designing more accessible VEs for persons with balance impairments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The objective of this research is to improve the accessibility of Head-Mounted Displays (HMDs) for users with balance impairments while they are in immersive Virtual Environments (VEs). Previous research has shown that most users experience some imbalance in a fully immersive VE. However, this imbalance is significantly worse in users with balance deficits. Thus, this research aims to determine an effective visual feedback technique to improve balance of persons while using VEs to improve the accessibility of HMDs. In order to do that, we conducted a study with seven users without impairment and seven users with balance impairments due to Multiple Sclerosis (MS). We investigated how a static reference frame (SRF) (e.g., a cross-hair always rendered in the same position on the user's display screen) impacts the participants' balances in VR. Results indicate that a SRF significantly improves balance in VR for users with MS. Based on these results, we propose guidelines for designing more accessible VEs for persons with balance impairments.",
"fno": "07504764",
"keywords": [
"Visualization",
"Games",
"Atmospheric Measurements",
"Particle Measurements",
"Virtual Environments",
"Multiple Sclerosis",
"Head Mounted Display",
"Virtual Reality",
"Balance",
"Accessibility"
],
"authors": [
{
"affiliation": "University of Texas at San Antonio",
"fullName": "Sharif Mohammad Shahnewaz Ferdous",
"givenName": "Sharif Mohammad Shahnewaz",
"surname": "Ferdous",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Texas at San Antonio",
"fullName": "Imtiaz Muhammad Arafat",
"givenName": "Imtiaz Muhammad",
"surname": "Arafat",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Texas at San Antonio",
"fullName": "John Quarles",
"givenName": "John",
"surname": "Quarles",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "283-284",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504763",
"articleId": "12OmNy5zsmT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504765",
"articleId": "12OmNzwpU30",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2015/7334/0/7334a428",
"title": "Effects of Somatosensory Video Games on Simple Reactions of Institutional-Dwelling Older Adults with Mild-Cognitive Impairments",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2015/7334a428/12OmNB8Cj2K",
"parentPublication": {
"id": "proceedings/icalt/2015/7334/0",
"title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2016/3834/0/3834a282",
"title": "Dynamic and Static Balance in Persons with Different Arch Height and Impacts of an Arch Support",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2016/3834a282/12OmNCwUmAP",
"parentPublication": {
"id": "proceedings/bibe/2016/3834/0",
"title": "2016 IEEE 16th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2017/5812/0/08056608",
"title": "Integratingfall-risk assessments within a simple balance exergame",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2017/08056608/12OmNvlg8h7",
"parentPublication": {
"id": "proceedings/vs-games/2017/5812/0",
"title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460041",
"title": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460041/12OmNwF0BS2",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802059",
"title": "A unique way to increase presence of mobility impaired users — Increasing confidence in balance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802059/12OmNxGAL3n",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892356",
"title": "Improve accessibility of virtual and augmented reality for people with balance impairments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892356/12OmNxYL5bz",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2016/0840/0/07859538",
"title": "Towards understanding the capability of spatial audio feedback in virtual environments for people with visual impairments",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2016/07859538/12OmNyL0TJW",
"parentPublication": {
"id": "proceedings/wevr/2016/0840/0",
"title": "2016 IEEE 2nd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446488",
"title": "Investigating the Reason for Increased Postural Instability in Virtual Reality for Persons with Balance Impairments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446488/13bd1gJ1v0N",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a782",
"title": "Auditory Feedback for Standing Balance Improvement in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a782/1CJc6WO5JBu",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08758372",
"title": "Evaluating Balance Recovery Techniques for Users Wearing Head-Mounted Display in VR",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08758372/1bwCk2J4N7q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0JubbA6A",
"doi": "10.1109/VR.2019.8797736",
"title": "Emotion Recognition in Gamers Wearing Head-mounted Display",
"normalizedTitle": "Emotion Recognition in Gamers Wearing Head-mounted Display",
"abstract": "Wearing head-mounted display (HMD) makes previous research regarding emotion recognition using machine vision ineffective since they utilized entire face images for training. In this paper, we trained the convolutional neural networks (CNNs) which are capable of estimating the emotions from the images of a face wearing a HMD by hiding eyes and eyebrows from existing face-emotion dataset. Our analysis based on the class activation maps show that it is capable of classifying emotions without the eyes and the eyebrows which ar to serve useful information in recognizing emotions. This implies the possibility of estimating the emotions from the images of humans wearing HMDs using machine vision.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Wearing head-mounted display (HMD) makes previous research regarding emotion recognition using machine vision ineffective since they utilized entire face images for training. In this paper, we trained the convolutional neural networks (CNNs) which are capable of estimating the emotions from the images of a face wearing a HMD by hiding eyes and eyebrows from existing face-emotion dataset. Our analysis based on the class activation maps show that it is capable of classifying emotions without the eyes and the eyebrows which ar to serve useful information in recognizing emotions. This implies the possibility of estimating the emotions from the images of humans wearing HMDs using machine vision.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Wearing head-mounted display (HMD) makes previous research regarding emotion recognition using machine vision ineffective since they utilized entire face images for training. In this paper, we trained the convolutional neural networks (CNNs) which are capable of estimating the emotions from the images of a face wearing a HMD by hiding eyes and eyebrows from existing face-emotion dataset. Our analysis based on the class activation maps show that it is capable of classifying emotions without the eyes and the eyebrows which ar to serve useful information in recognizing emotions. This implies the possibility of estimating the emotions from the images of humans wearing HMDs using machine vision.",
"fno": "08797736",
"keywords": [
"Computer Vision",
"Convolutional Neural Nets",
"Emotion Recognition",
"Face Recognition",
"Feature Extraction",
"Helmet Mounted Displays",
"Wearable Computers",
"Emotion Recognition",
"Wearing Head Mounted Display",
"Machine Vision",
"Convolutional Neural Networks",
"HMD",
"Eyebrows",
"Class Activation Maps",
"Face Images",
"Face Emotion Dataset",
"CN Ns",
"Face",
"Emotion Recognition",
"Resists",
"Feature Extraction",
"Eyebrows",
"Training",
"Cams",
"Emotion Recognition",
"Deep Neural Network"
],
"authors": [
{
"affiliation": "Yonsei University, School of Mechanical Engineering",
"fullName": "Hwanmoo Yong",
"givenName": "Hwanmoo",
"surname": "Yong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University, School of Mechanical Engineering",
"fullName": "Jisuk Lee",
"givenName": "Jisuk",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Motion Device Inc.",
"fullName": "Jongeun Choi",
"givenName": "Jongeun",
"surname": "Choi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1251-1252",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798294",
"articleId": "1cJ0TNjjJp6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798355",
"articleId": "1cJ0MbZf2Fi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgrapi/2010/8420/0/05720342",
"title": "Eyes and Eyebrows Detection for Performance Driven Animation",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720342/12OmNvoWV3q",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892245",
"title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892245/12OmNwkR5tU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2002/1781/0/17810149",
"title": "Diminishing Head-Mounted Display for Shared Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2002/17810149/12OmNy4r3Zp",
"parentPublication": {
"id": "proceedings/ismar/2002/1781/0",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945716",
"title": "New distances combination for facial expression recognition from image sequences",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945716/12OmNyrIarE",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbr-lars/2012/4906/0/4906a251",
"title": "Imitation of Facial Expressions for a Virtual Robotic Head",
"doi": null,
"abstractUrl": "/proceedings-article/sbr-lars/2012/4906a251/12OmNz5apIu",
"parentPublication": {
"id": "proceedings/sbr-lars/2012/4906/0",
"title": "Brazilian Robotics Symposium and Latin American Robotics Symposium (SBR-LARS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2018/5892/0/08466438",
"title": "Facial Expression Recognition Using Adaptive Neuro-fuzzy Inference Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2018/08466438/13Jkr9zNRVo",
"parentPublication": {
"id": "proceedings/icis/2018/5892/0",
"title": "2018 IEEE/ACIS 17th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2021/0021/0/09666368",
"title": "SUGO-MIMI: A Waggle Ear-Type Device Linked to Eyebrows",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2021/09666368/1A3hNuppDLW",
"parentPublication": {
"id": "proceedings/aciiw/2021/0021/0",
"title": "2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a105",
"title": "Real-Time Recognition of In-Place Body Actions and Head Gestures using Only a Head-Mounted Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a105/1MNgCnmbXyM",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797925",
"title": "Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797925/1cJ0J09XMdy",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09146716",
"title": "Volumetric Head-Mounted Display With Locally Adaptive Focal Blocks",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09146716/1lHjPSqVrpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy5hRda",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"acronym": "3dpvt",
"groupId": "1000000",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBLdKJ3",
"doi": "10.1109/3DPVT.2006.7",
"title": "3D Skeleton-Based Body Pose Recovery",
"normalizedTitle": "3D Skeleton-Based Body Pose Recovery",
"abstract": "This paper presents an approach to recover body motions from multiple views using a 3D skeletal model. It takes, as input, foreground silhouette sequences from multiple viewpoints, and computes, for each frame, the skeleton pose which best fit the body pose. Skeletal models encode mostly motion information and allows therefore to separate motion estimation from shape estimation for which solutions exist; And focusing on motion parameters significantly reduces the dependancy on specific body shapes, yielding thus more flexible solutions for body motion capture. However, a problem generally faced with skeletal models is to find adequate measurements with which to fit the model. In this paper, we propose to use the medial axis of the body shape to this purpose. Such medial axis can be estimated from the visual hull, a shape approximation which is easily obtained from the silhouette information. Experiments show that this approach is robust to several perturbations in the model or in the input data, and also allows fast body motions or, equivalently, important motions between consecutive frames.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an approach to recover body motions from multiple views using a 3D skeletal model. It takes, as input, foreground silhouette sequences from multiple viewpoints, and computes, for each frame, the skeleton pose which best fit the body pose. Skeletal models encode mostly motion information and allows therefore to separate motion estimation from shape estimation for which solutions exist; And focusing on motion parameters significantly reduces the dependancy on specific body shapes, yielding thus more flexible solutions for body motion capture. However, a problem generally faced with skeletal models is to find adequate measurements with which to fit the model. In this paper, we propose to use the medial axis of the body shape to this purpose. Such medial axis can be estimated from the visual hull, a shape approximation which is easily obtained from the silhouette information. Experiments show that this approach is robust to several perturbations in the model or in the input data, and also allows fast body motions or, equivalently, important motions between consecutive frames.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an approach to recover body motions from multiple views using a 3D skeletal model. It takes, as input, foreground silhouette sequences from multiple viewpoints, and computes, for each frame, the skeleton pose which best fit the body pose. Skeletal models encode mostly motion information and allows therefore to separate motion estimation from shape estimation for which solutions exist; And focusing on motion parameters significantly reduces the dependancy on specific body shapes, yielding thus more flexible solutions for body motion capture. However, a problem generally faced with skeletal models is to find adequate measurements with which to fit the model. In this paper, we propose to use the medial axis of the body shape to this purpose. Such medial axis can be estimated from the visual hull, a shape approximation which is easily obtained from the silhouette information. Experiments show that this approach is robust to several perturbations in the model or in the input data, and also allows fast body motions or, equivalently, important motions between consecutive frames.",
"fno": "282500389",
"keywords": [],
"authors": [
{
"affiliation": "GRAVIR-INRIA Rhone-Alpes, France",
"fullName": "Clement Menier",
"givenName": "Clement",
"surname": "Menier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "GRAVIR-INRIA Rhone-Alpes, France",
"fullName": "Edmond Boyer",
"givenName": "Edmond",
"surname": "Boyer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "GRAVIR-INRIA Rhone-Alpes, France",
"fullName": "Bruno Raffin",
"givenName": "Bruno",
"surname": "Raffin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dpvt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-06-01T00:00:00",
"pubType": "proceedings",
"pages": "389-396",
"year": "2006",
"issn": null,
"isbn": "0-7695-2825-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04155751",
"articleId": "12OmNARRYtV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "282500397",
"articleId": "12OmNCxtyM3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2003/2105/9/210590102",
"title": "Enforcing Constraints for Human Body Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2003/210590102/12OmNCcKQL9",
"parentPublication": {
"id": "proceedings/cvprw/2003/2105/9",
"title": "2003 Conference on Computer Vision and Pattern Recognition Workshop - Volume 9",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2003/1950/2/195021071",
"title": "Constraining Human Body Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2003/195021071/12OmNqGRGbP",
"parentPublication": {
"id": "proceedings/iccv/2003/1950/2",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2008/2242/0/04587795",
"title": "View-invariant recognition of body pose from space-time templates",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2008/04587795/12OmNqHqSmg",
"parentPublication": {
"id": "proceedings/cvpr/2008/2242/0",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457595",
"title": "3D Body-part tracking of two persons using a hierarchical body model",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457595/12OmNrY3LDY",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2009/3791/0/3791a102",
"title": "Cyclic Animation of Human Body Using PDE Surfaces and Maya",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2009/3791a102/12OmNwdtwgQ",
"parentPublication": {
"id": "proceedings/cw/2009/3791/0",
"title": "2009 International Conference on CyberWorlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2001/1272/1/127210821",
"title": "Estimating 3D Body Pose using Uncalibrated Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2001/127210821/12OmNz61dyO",
"parentPublication": {
"id": "proceedings/cvpr/2001/1272/1",
"title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2010/6984/0/05540153",
"title": "Multisensor-fusion for 3D full-body human motion capture",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2010/05540153/12OmNzVoBPU",
"parentPublication": {
"id": "proceedings/cvpr/2010/6984/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2021/04/08669824",
"title": "Survey on Style in 3D Human Body Motion: Taxonomy, Data, Recognition and Its Applications",
"doi": null,
"abstractUrl": "/journal/ta/2021/04/08669824/18wIZlDXxy8",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1323",
"title": "Learning Motion Priors for 4D Human Body Capture in 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1323/1BmLs4NuZAQ",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900d389",
"title": "Skeletor: Skeletal Transformers for Robust Body-Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900d389/1yJYua1hTXi",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNB836KR",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"acronym": "ism",
"groupId": "1001094",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC1Y5qd",
"doi": "10.1109/ISM.2016.0042",
"title": "Adaptive Pooling of the Most Relevant Spatio-Temporal Features for Action Recognition",
"normalizedTitle": "Adaptive Pooling of the Most Relevant Spatio-Temporal Features for Action Recognition",
"abstract": "This paper presents a model-based action recognition system that utilizes the Kinect 3D skeleton to construct adaptive spatio-temporal motion representations. The proposed method utilizes two features, namely the joint relative distance (JRD) and joint relative angle (JRA) to encode the spatio-temporal motion patterns of different skeletal joints. To evaluate the relevance of a particular joint-pair in representing an action class, we introduce a flatness measure that quantifies the level of engagement of the corresponding joint-pair in performing the action. The flatness measures computed for all skeletal joint-pairs are accumulated to construct a joint-pair relevance (JPR) matrix, which facilitates adaptive pooling of the most relevant spatio-temporal features to construct the final motion description for individual action classes. In addition, we propose a score level fusion of JRD and JRA features with a weighted dynamic time warping (DTW)-based matching scheme to effectively boost the overall recognition performance. In our experiments, the proposed method achieves better recognition performance than well-known existing methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a model-based action recognition system that utilizes the Kinect 3D skeleton to construct adaptive spatio-temporal motion representations. The proposed method utilizes two features, namely the joint relative distance (JRD) and joint relative angle (JRA) to encode the spatio-temporal motion patterns of different skeletal joints. To evaluate the relevance of a particular joint-pair in representing an action class, we introduce a flatness measure that quantifies the level of engagement of the corresponding joint-pair in performing the action. The flatness measures computed for all skeletal joint-pairs are accumulated to construct a joint-pair relevance (JPR) matrix, which facilitates adaptive pooling of the most relevant spatio-temporal features to construct the final motion description for individual action classes. In addition, we propose a score level fusion of JRD and JRA features with a weighted dynamic time warping (DTW)-based matching scheme to effectively boost the overall recognition performance. In our experiments, the proposed method achieves better recognition performance than well-known existing methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a model-based action recognition system that utilizes the Kinect 3D skeleton to construct adaptive spatio-temporal motion representations. The proposed method utilizes two features, namely the joint relative distance (JRD) and joint relative angle (JRA) to encode the spatio-temporal motion patterns of different skeletal joints. To evaluate the relevance of a particular joint-pair in representing an action class, we introduce a flatness measure that quantifies the level of engagement of the corresponding joint-pair in performing the action. The flatness measures computed for all skeletal joint-pairs are accumulated to construct a joint-pair relevance (JPR) matrix, which facilitates adaptive pooling of the most relevant spatio-temporal features to construct the final motion description for individual action classes. In addition, we propose a score level fusion of JRD and JRA features with a weighted dynamic time warping (DTW)-based matching scheme to effectively boost the overall recognition performance. In our experiments, the proposed method achieves better recognition performance than well-known existing methods.",
"fno": "4571a177",
"keywords": [
"Training",
"Three Dimensional Displays",
"Skeleton",
"Testing",
"Indexes",
"Motion Measurement",
"Dynamics",
"Score Fusion",
"Action Recognition",
"Kinect Skeleton",
"Joint Relevance",
"Motion Representation",
"Dynamic Time Warping"
],
"authors": [
{
"affiliation": null,
"fullName": "Faisal Ahmed",
"givenName": "Faisal",
"surname": "Ahmed",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Padma Polash Paul",
"givenName": "Padma Polash",
"surname": "Paul",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Marina Gavrilova",
"givenName": "Marina",
"surname": "Gavrilova",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ism",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-12-01T00:00:00",
"pubType": "proceedings",
"pages": "177-180",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-4571-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4571a173",
"articleId": "12OmNvk7JML",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4571a181",
"articleId": "12OmNAle6uF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bigcomp/2015/7303/0/07072828",
"title": "Substitutive skeleton fusion for human action recognition",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2015/07072828/12OmNAIdBR3",
"parentPublication": {
"id": "proceedings/bigcomp/2015/7303/0",
"title": "2015 International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06239231",
"title": "Sequence of the Most Informative Joints (SMIJ): A new representation for human skeletal action recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239231/12OmNBKW9BF",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926608",
"title": "Real-Time Online Action Detection Forests Using Spatio-Temporal Contexts",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926608/12OmNqyUUt1",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a445",
"title": "Spatio-Temporal Naive-Bayes Nearest-Neighbor (ST-NBNN) for Skeleton-Based Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a445/12OmNxvwp1x",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284738",
"title": "Human Action Recognition Using 2-D Spatio-Temporal Templates",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284738/12OmNyoSbaC",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbd/2022/0745/0/074500a195",
"title": "Action recognition based on parallel convolutional recurrent neural networks",
"doi": null,
"abstractUrl": "/proceedings-article/cbd/2022/074500a195/1EVijA7T008",
"parentPublication": {
"id": "proceedings/cbd/2022/0745/0",
"title": "2021 Ninth International Conference on Advanced Cloud and Big Data (CBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859595",
"title": "Skeletal Twins: Unsupervised Skeleton-Based Action Representation Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859595/1G9DMR0cmd2",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/5555/01/09915478",
"title": "View Invariant Spatio-Temporal Descriptor for action recognition from Skeleton Sequences",
"doi": null,
"abstractUrl": "/journal/ai/5555/01/09915478/1Hmgqug2qNa",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d319",
"title": "STAR-Transformer: A Spatio-temporal Cross Attention Transformer for Human Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d319/1L8qpvQuULe",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093361",
"title": "Stacked Spatio-Temporal Graph Convolutional Networks for Action Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093361/1jPbuxiCzUk",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyNQSGO",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvmG82h",
"doi": "10.1109/CVPR.2007.383235",
"title": "Segmenting Motions of Different Types by Unsupervised Manifold Clustering",
"normalizedTitle": "Segmenting Motions of Different Types by Unsupervised Manifold Clustering",
"abstract": "We propose a novel algorithm for segmenting multiple motions of different types from point correspondences in multiple affine or perspective views. Since point trajectories associated with different motions live in different manifolds, traditional approaches deal with only one manifold type: linear subspaces for affine views, and homographic, bilinear and trilinear varieties for two and three perspective views. As real motion sequences contain motions of different types, we cast motion segmentation as a problem of clustering manifolds of different types. Rather than explicitly modeling each manifold as a linear, bilinear or multilinear variety, we use nonlinear dimensionality reduction to learn a low-dimensional representation of the union of all manifolds. We show that for a union of separated manifolds, the LLE algorithm computes a matrix whose null space contains vectors giving the segmentation of the data. An analysis of the variance of these vectors allows us to distinguish them from other vectors in the null space. This leads to a new algorithm for clustering both linear and non-linear manifolds. Although this algorithm is theoretically designed for separated manifolds, our experiments demonstrate its performance on real data where this assumption does not hold. We test our algorithm on the Hopkins 155 motion segmentation database and achieve an average classification error of 4.8%, which compares favorably against state-of-the art multiframe motion segmentation methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel algorithm for segmenting multiple motions of different types from point correspondences in multiple affine or perspective views. Since point trajectories associated with different motions live in different manifolds, traditional approaches deal with only one manifold type: linear subspaces for affine views, and homographic, bilinear and trilinear varieties for two and three perspective views. As real motion sequences contain motions of different types, we cast motion segmentation as a problem of clustering manifolds of different types. Rather than explicitly modeling each manifold as a linear, bilinear or multilinear variety, we use nonlinear dimensionality reduction to learn a low-dimensional representation of the union of all manifolds. We show that for a union of separated manifolds, the LLE algorithm computes a matrix whose null space contains vectors giving the segmentation of the data. An analysis of the variance of these vectors allows us to distinguish them from other vectors in the null space. This leads to a new algorithm for clustering both linear and non-linear manifolds. Although this algorithm is theoretically designed for separated manifolds, our experiments demonstrate its performance on real data where this assumption does not hold. We test our algorithm on the Hopkins 155 motion segmentation database and achieve an average classification error of 4.8%, which compares favorably against state-of-the art multiframe motion segmentation methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel algorithm for segmenting multiple motions of different types from point correspondences in multiple affine or perspective views. Since point trajectories associated with different motions live in different manifolds, traditional approaches deal with only one manifold type: linear subspaces for affine views, and homographic, bilinear and trilinear varieties for two and three perspective views. As real motion sequences contain motions of different types, we cast motion segmentation as a problem of clustering manifolds of different types. Rather than explicitly modeling each manifold as a linear, bilinear or multilinear variety, we use nonlinear dimensionality reduction to learn a low-dimensional representation of the union of all manifolds. We show that for a union of separated manifolds, the LLE algorithm computes a matrix whose null space contains vectors giving the segmentation of the data. An analysis of the variance of these vectors allows us to distinguish them from other vectors in the null space. This leads to a new algorithm for clustering both linear and non-linear manifolds. Although this algorithm is theoretically designed for separated manifolds, our experiments demonstrate its performance on real data where this assumption does not hold. We test our algorithm on the Hopkins 155 motion segmentation database and achieve an average classification error of 4.8%, which compares favorably against state-of-the art multiframe motion segmentation methods.",
"fno": "04270260",
"keywords": [],
"authors": [
{
"affiliation": "Center for Imaging Science, Johns Hopkins University, Baltimore MD 21218, USA",
"fullName": "Alvina Goh",
"givenName": "Alvina",
"surname": "Goh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Imaging Science, Johns Hopkins University, Baltimore MD 21218, USA",
"fullName": "Rene Vidal",
"givenName": "Rene",
"surname": "Vidal",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2007",
"issn": null,
"isbn": "1-4244-1179-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04270259",
"articleId": "12OmNyUFfTM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04270261",
"articleId": "12OmNBEYzQI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2011/0394/0/05995703",
"title": "On analyzing video with very small motions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995703/12OmNAkniUH",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270115",
"title": "Projective Factorization of Multiple Rigid-Body Motions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270115/12OmNrFkeOF",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2006/2597/1/259711168",
"title": "Nonlinear Mean Shift for Clustering over Analytic Manifolds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2006/259711168/12OmNyQ7FWy",
"parentPublication": {
"id": "proceedings/cvpr/2006/2597/2",
"title": "2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2014/4985/0/06836051",
"title": "Unsupervised iterative manifold alignment via local feature histograms",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2014/06836051/12OmNyv7m3a",
"parentPublication": {
"id": "proceedings/wacv/2014/4985/0",
"title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1994/5825/0/00323921",
"title": "Detecting multiple image motions by exploiting temporal coherence of apparent motion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1994/00323921/12OmNzUgd0v",
"parentPublication": {
"id": "proceedings/cvpr/1994/5825/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1999/0164/1/01640469",
"title": "Critical Motions and Ambiguous Euclidean Reconstructions in Auto-Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1999/01640469/12OmNzUgd1Q",
"parentPublication": {
"id": "proceedings/iccv/1999/0164/1",
"title": "Proceedings of the Seventh IEEE International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wvm/1991/2153/0/00212809",
"title": "Factorization-based segmentation of motions",
"doi": null,
"abstractUrl": "/proceedings-article/wvm/1991/00212809/12OmNzVGcCu",
"parentPublication": {
"id": "proceedings/wvm/1991/2153/0",
"title": "Proceedings of the IEEE Workshop on Visual Motion",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08809754",
"title": "Spatio-Temporal Manifold Learning for Human Motions via Long-Horizon Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08809754/1cHEztXuDmM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900b266",
"title": "Learning to Segment Rigid Motions from Two Frames",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900b266/1yeMkVZimPK",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyNQSG3",
"title": "Computer Science and Information Engineering, World Congress on",
"acronym": "csie",
"groupId": "1002821",
"volume": "4",
"displayVolume": "4",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyKrHcE",
"doi": "10.1109/CSIE.2009.504",
"title": "Shot-Change Detections and Shot-Change Effects Recognition based on Motions and Their Estimation Reliabilities",
"normalizedTitle": "Shot-Change Detections and Shot-Change Effects Recognition based on Motions and Their Estimation Reliabilities",
"abstract": "There are many shot-change detection methods. They work well in videos without large motions and fast camera works. This paper proposes more robust method for detecting shot changes and recognizing their shot-change effects. The method uses the motion between frames and their estimation reliabilities. With motion estimation, we can have the correspondence between frames. But we have false estimation when there is a shot-change between frames. The proposed method estimates the reliabilities of the motion estimation. With the reliabilities, the proposed method estimates the probabilities of correspondence between frames. This reliability and the estimated motions lead the estimation of existence of shot-changes and the type of shot-change effects.Fist, this paper proposes the shot-change detection method based on the motions and their estimation reliabilities. Then we show the experiments on simulated videos and real videos. And last, we conclude our work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "There are many shot-change detection methods. They work well in videos without large motions and fast camera works. This paper proposes more robust method for detecting shot changes and recognizing their shot-change effects. The method uses the motion between frames and their estimation reliabilities. With motion estimation, we can have the correspondence between frames. But we have false estimation when there is a shot-change between frames. The proposed method estimates the reliabilities of the motion estimation. With the reliabilities, the proposed method estimates the probabilities of correspondence between frames. This reliability and the estimated motions lead the estimation of existence of shot-changes and the type of shot-change effects.Fist, this paper proposes the shot-change detection method based on the motions and their estimation reliabilities. Then we show the experiments on simulated videos and real videos. And last, we conclude our work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "There are many shot-change detection methods. They work well in videos without large motions and fast camera works. This paper proposes more robust method for detecting shot changes and recognizing their shot-change effects. The method uses the motion between frames and their estimation reliabilities. With motion estimation, we can have the correspondence between frames. But we have false estimation when there is a shot-change between frames. The proposed method estimates the reliabilities of the motion estimation. With the reliabilities, the proposed method estimates the probabilities of correspondence between frames. This reliability and the estimated motions lead the estimation of existence of shot-changes and the type of shot-change effects.Fist, this paper proposes the shot-change detection method based on the motions and their estimation reliabilities. Then we show the experiments on simulated videos and real videos. And last, we conclude our work.",
"fno": "3507d194",
"keywords": [
"Shot Change Detection",
"Space Time Image",
"Motion Estimation"
],
"authors": [
{
"affiliation": null,
"fullName": "Kyota Aoki",
"givenName": "Kyota",
"surname": "Aoki",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "csie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-03-01T00:00:00",
"pubType": "proceedings",
"pages": "194-198",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3507-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3507d187",
"articleId": "12OmNx6g6me",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3507d199",
"articleId": "12OmNxA3YS9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icee/2010/3997/0/3997b647",
"title": "A Shot Boundary Detection Method Based on Color Space",
"doi": null,
"abstractUrl": "/proceedings-article/icee/2010/3997b647/12OmNAXPydF",
"parentPublication": {
"id": "proceedings/icee/2010/3997/0",
"title": "International Conference on E-Business and E-Government",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aici/2009/3816/1/3816a011",
"title": "Algorithm of Shot Detection Based on SVM with Modified Kernel Function",
"doi": null,
"abstractUrl": "/proceedings-article/aici/2009/3816a011/12OmNB0X8vD",
"parentPublication": {
"id": "proceedings/aici/2009/3816/1",
"title": "2009 International Conference on Artificial Intelligence and Computational Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iw-mmdbms/1996/7469/0/74690101",
"title": "Efficient Shot Change Detection on Compressed Video Data",
"doi": null,
"abstractUrl": "/proceedings-article/iw-mmdbms/1996/74690101/12OmNCdTeLK",
"parentPublication": {
"id": "proceedings/iw-mmdbms/1996/7469/0",
"title": "Multimedia Database Management Systems, International Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2008/3278/0/3278a204",
"title": "Detection of H.264 Shot Change Using Intra Predicted Direction",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2008/3278a204/12OmNqJq4gR",
"parentPublication": {
"id": "proceedings/iih-msp/2008/3278/0",
"title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2008/3494/2/3494b604",
"title": "A Novel Shot Edge Detection Algorithm based on Chi-square Histogram and Macro-block Statistics",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/3494b604/12OmNvTk06A",
"parentPublication": {
"id": "proceedings/isise/2008/3494/2",
"title": "2008 International Symposium on Information Science and Engineering (ISISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/6/3507f388",
"title": "MPEG-7 Feature Based Shot Change Detection for Scenery Video",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507f388/12OmNwDj0YD",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031e630",
"title": "An Abrupt Shot Change Detection Algorithm Based on the YUV Space",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031e630/12OmNxuo0gL",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2015/9721/0/9721a295",
"title": "A Gradual Shot Change Detection Using Combination of Luminance and Motion Features for Frame Rate Up Conversion",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2015/9721a295/12OmNyFCvT1",
"parentPublication": {
"id": "proceedings/sitis/2015/9721/0",
"title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv-motion/2005/2271/2/227120235",
"title": "Object Level Frame Comparison for Video Shot Detection",
"doi": null,
"abstractUrl": "/proceedings-article/wacv-motion/2005/227120235/12OmNyKrHpQ",
"parentPublication": {
"id": "proceedings/wacv-motion/2005/2271/2",
"title": "Applications of Computer Vision and the IEEE Workshop on Motion and Video Computing, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539827",
"title": "An Efficient Graphical Shot Verifier Incorporating Visual Rhythm",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539827/12OmNzUxOeY",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvk7JRv",
"title": "2015 8th International Conference on Signal Processing, Image Processing and Pattern Recognition (SIP)",
"acronym": "sip",
"groupId": "1805847",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyo1nLK",
"doi": "10.1109/SIP.2015.8",
"title": "Classification of Dance Motions with Depth Cameras Using Subsequence Dynamic Time Warping",
"normalizedTitle": "Classification of Dance Motions with Depth Cameras Using Subsequence Dynamic Time Warping",
"abstract": "This paper proposes a method for classifying 3D dance motions especially selected from Korean POP (K-POP) dance performance, which is a key technique for the dance coaching contents and choreography retrieval system. Compared to actions addressed in daily life and existing games, K-POP dance motions are much more dynamic and vary substantially according to the performers. To cope with the variation of the amplitude of pose, we present a practical pose descriptor based on relative rotations between two body joints in the spherical coordinate system. As a method to measure similarity between two incomplete motion sequences, subsequence Dynamic Time Warping (DTW) algorithm is explored that supports partial matches. For the tests, 200 popular dance segments are gathered from 100 K-POP songs by utilizing the Kinect for Windows v2 sensor of Microsoft. The experimental results show that our representation and matching method can achieve an excellent performance in the classification of complex dance motions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a method for classifying 3D dance motions especially selected from Korean POP (K-POP) dance performance, which is a key technique for the dance coaching contents and choreography retrieval system. Compared to actions addressed in daily life and existing games, K-POP dance motions are much more dynamic and vary substantially according to the performers. To cope with the variation of the amplitude of pose, we present a practical pose descriptor based on relative rotations between two body joints in the spherical coordinate system. As a method to measure similarity between two incomplete motion sequences, subsequence Dynamic Time Warping (DTW) algorithm is explored that supports partial matches. For the tests, 200 popular dance segments are gathered from 100 K-POP songs by utilizing the Kinect for Windows v2 sensor of Microsoft. The experimental results show that our representation and matching method can achieve an excellent performance in the classification of complex dance motions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a method for classifying 3D dance motions especially selected from Korean POP (K-POP) dance performance, which is a key technique for the dance coaching contents and choreography retrieval system. Compared to actions addressed in daily life and existing games, K-POP dance motions are much more dynamic and vary substantially according to the performers. To cope with the variation of the amplitude of pose, we present a practical pose descriptor based on relative rotations between two body joints in the spherical coordinate system. As a method to measure similarity between two incomplete motion sequences, subsequence Dynamic Time Warping (DTW) algorithm is explored that supports partial matches. For the tests, 200 popular dance segments are gathered from 100 K-POP songs by utilizing the Kinect for Windows v2 sensor of Microsoft. The experimental results show that our representation and matching method can achieve an excellent performance in the classification of complex dance motions.",
"fno": "9855a005",
"keywords": [
"Dynamics",
"Skeleton",
"Heuristic Algorithms",
"Classification Algorithms",
"Three Dimensional Displays",
"Motion Segmentation",
"Feature Extraction"
],
"authors": [
{
"affiliation": null,
"fullName": "Dohyung Kim",
"givenName": "Dohyung",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Minsu Jang",
"givenName": "Minsu",
"surname": "Jang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Youngwoo Yoon",
"givenName": "Youngwoo",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jaehong Kim",
"givenName": "Jaehong",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-11-01T00:00:00",
"pubType": "proceedings",
"pages": "5-8",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-9855-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "9855a001",
"articleId": "12OmNxj23jC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "9855a009",
"articleId": "12OmNx5Yvne",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2010/4109/0/4109b537",
"title": "Recognizing Dance Motions with Segmental SVD",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109b537/12OmNBSSVfp",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitcs/2016/3765/0/07740331",
"title": "Example-Based Retrieval System for Human Motion Data",
"doi": null,
"abstractUrl": "/proceedings-article/icitcs/2016/07740331/12OmNC8dgcL",
"parentPublication": {
"id": "proceedings/icitcs/2016/3765/0",
"title": "2016 6th International Conference on IT Convergence and Security (ICITCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2009/3791/0/3791a171",
"title": "Automatic Composition for Contemporary Dance Using 3D Motion Clips: Experiment on Dance Training and System Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2009/3791a171/12OmNwEJ0HF",
"parentPublication": {
"id": "proceedings/cw/2009/3791/0",
"title": "2009 International Conference on CyberWorlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/3/169530676",
"title": "Recovering Structures and Motions from Mutual Projection of Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169530676/12OmNywfKz2",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/3",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2006/2503/0/25030481",
"title": "Dance Posture Recognition Using Wide-baseline Orthogonal Stereo Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2006/25030481/12OmNzWOBf5",
"parentPublication": {
"id": "proceedings/fg/2006/2503/0",
"title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030501",
"title": "Example-Based Automatic Music-Driven Conventional Dance Motion Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030501/13rRUwwaKt6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0/751800a868",
"title": "Dance Posture/Steps Classification Using 3D Joints from the Kinect Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2018/751800a868/17D45Xh13wO",
"parentPublication": {
"id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0",
"title": "2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09745335",
"title": "Rhythm is a Dancer: Music-Driven Motion Synthesis with Global Structure",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09745335/1CagHUR61pe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08809754",
"title": "Spatio-Temporal Manifold Learning for Human Motions via Long-Horizon Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08809754/1cHEztXuDmM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-&-icivpr/2020/9331/0/09306581",
"title": "Performance Evaluation of Markerless 3D Skeleton Pose Estimates with Pop Dance Motion Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-&-icivpr/2020/09306581/1qcicQKjJ6g",
"parentPublication": {
"id": "proceedings/iciev-&-icivpr/2020/9331/0",
"title": "2020 Joint 9th International Conference on Informatics, Electronics & Vision (ICIEV) and 2020 4th International Conference on Imaging, Vision & Pattern Recognition (icIVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1A6Bmtfk8WA",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"acronym": "fg",
"groupId": "1000065",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1A6BsHKK7Xa",
"doi": "10.1109/FG52635.2021.9666934",
"title": "Lightweight Deep Symmetric Positive Definite Manifold Network for Real-Time 3D Hand Gesture Recognition",
"normalizedTitle": "Lightweight Deep Symmetric Positive Definite Manifold Network for Real-Time 3D Hand Gesture Recognition",
"abstract": "This paper proposes a new neural network based on Symmetric Positive Definite (SPD) manifold learning for real-time skeleton-based hand gesture recognition. The transformation of the input skeletal data into SPD matrices allows to encode efficiently high-order statistics such as covariances or correlations between the joints' features. These matrices are combined and transformed by our deep neural network which is thus constrained to work on the manifold of such matrices. The online recognition is performed using two sliding windows moving along the gesture's stream in order to simultaneously detect and classify the occurrence of a new gesture within the stream. The proposed network is validated on a challenging dataset and shows state-of-the-art performances both in terms of accuracy and inference time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a new neural network based on Symmetric Positive Definite (SPD) manifold learning for real-time skeleton-based hand gesture recognition. The transformation of the input skeletal data into SPD matrices allows to encode efficiently high-order statistics such as covariances or correlations between the joints' features. These matrices are combined and transformed by our deep neural network which is thus constrained to work on the manifold of such matrices. The online recognition is performed using two sliding windows moving along the gesture's stream in order to simultaneously detect and classify the occurrence of a new gesture within the stream. The proposed network is validated on a challenging dataset and shows state-of-the-art performances both in terms of accuracy and inference time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a new neural network based on Symmetric Positive Definite (SPD) manifold learning for real-time skeleton-based hand gesture recognition. The transformation of the input skeletal data into SPD matrices allows to encode efficiently high-order statistics such as covariances or correlations between the joints' features. These matrices are combined and transformed by our deep neural network which is thus constrained to work on the manifold of such matrices. The online recognition is performed using two sliding windows moving along the gesture's stream in order to simultaneously detect and classify the occurrence of a new gesture within the stream. The proposed network is validated on a challenging dataset and shows state-of-the-art performances both in terms of accuracy and inference time.",
"fno": "09666934",
"keywords": [
"Deep Learning Artificial Intelligence",
"Gesture Recognition",
"Higher Order Statistics",
"Image Classification",
"Medical Computing",
"Medical Image Processing",
"Palmprint Recognition",
"Hand Gesture Recognition",
"SPD Matrices",
"High Order Statistics",
"Deep Neural Network",
"Online Recognition",
"Skeletal Data",
"Symmetric Positive Definite Manifold Network",
"Sliding Windows",
"Manifolds",
"Symmetric Matrices",
"Three Dimensional Displays",
"Pipelines",
"Gesture Recognition",
"Real Time Systems",
"Skeleton"
],
"authors": [
{
"affiliation": "UNICAEN, ENSICAEN, CNRS, GREYC,Caen,France,14000",
"fullName": "Mostefa Ben Naceur",
"givenName": "Mostefa Ben",
"surname": "Naceur",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UNICAEN, ENSICAEN, CNRS, GREYC,Caen,France,14000",
"fullName": "Luc Brun",
"givenName": "Luc",
"surname": "Brun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UNICAEN, ENSICAEN, CNRS, GREYC,Caen,France,14000",
"fullName": "Olivier Lézoray",
"givenName": "Olivier",
"surname": "Lézoray",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-12-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3176-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09667073",
"articleId": "1A6BKCWVZ3W",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09666948",
"articleId": "1A6BxEU8Rpu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851f157",
"title": "Kernel Sparse Subspace Clustering on Symmetric Positive Definite Manifolds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851f157/12OmNBlFQUa",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2017/4662/0/08388651",
"title": "Sparse representation based classification with intra-class variation dictionary on symmetric positive definite manifolds",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2017/08388651/12OmNqJHFte",
"parentPublication": {
"id": "proceedings/isspit/2017/4662/0",
"title": "2017 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989a073",
"title": "Kernel Methods on the Riemannian Manifold of Symmetric Positive Definite Matrices",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989a073/12OmNviZlBK",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2014/4985/0/06836085",
"title": "Random projections on manifolds of Symmetric Positive Definite matrices for image classification",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2014/06836085/12OmNvmowR5",
"parentPublication": {
"id": "proceedings/wacv/2014/4985/0",
"title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477621",
"title": "Image set classification by symmetric positive semi-definite matrices",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477621/12OmNx5Yv4o",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/08237720",
"title": "Learning Discriminative αβ-Divergences for Positive Definite Matrices",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/08237720/12OmNzmLxEW",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/03/ttp2014030592",
"title": "Tensor Sparse Coding for Positive Definite Matrices",
"doi": null,
"abstractUrl": "/journal/tp/2014/03/ttp2014030592/13rRUEgarCx",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486518",
"title": "Support Vector Metric Learning on Symmetric Positive Definite Manifold",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486518/14jQfOuD6nK",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2019/0089/0/08756512",
"title": "Skeleton-Based Hand Gesture Recognition by Learning SPD Matrices with Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2019/08756512/1bzYuG4rrRm",
"parentPublication": {
"id": "proceedings/fg/2019/0089/0",
"title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102821",
"title": "Kernel Clustering On Symmetric Positive Definite Manifolds Via Double Approximated Low Rank Representation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102821/1kwr0ndN8oE",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1G9DtzCwrjW",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G9E21CBr44",
"doi": "10.1109/ICME52920.2022.9859776",
"title": "Unpaired Motion Style Transfer with Motion-Oriented Projection Flow Network",
"normalizedTitle": "Unpaired Motion Style Transfer with Motion-Oriented Projection Flow Network",
"abstract": "Existing motion style transfer methods trained with unpaired samples tend to generate motions with inconsistent content or inconsistent number of frames when compared with the source motion. Moreover, due to the limited training samples, these methods perform worse in unseen style. In this paper, we propose a novel unpaired motion style transfer framework that generates complete stylized motions with consistent content. We introduce a motion-oriented projection flow network (M-PFN) designed for temporal motion data, which encodes the content and style motions into latent codes and decodes the stylized features produced by adaptive instance normalization (AdaIN) into stylized motions. The M-PFN contains dedicated operations and modules, e.g., Transformer, to process the temporal information of motions, which help to improve the continuity of the generated motions. Comparisons with the state-of-the-art methods show that our method effectively transfers the style of the motions while retaining the complete content and has stronger generalization ability in unseen style features.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Existing motion style transfer methods trained with unpaired samples tend to generate motions with inconsistent content or inconsistent number of frames when compared with the source motion. Moreover, due to the limited training samples, these methods perform worse in unseen style. In this paper, we propose a novel unpaired motion style transfer framework that generates complete stylized motions with consistent content. We introduce a motion-oriented projection flow network (M-PFN) designed for temporal motion data, which encodes the content and style motions into latent codes and decodes the stylized features produced by adaptive instance normalization (AdaIN) into stylized motions. The M-PFN contains dedicated operations and modules, e.g., Transformer, to process the temporal information of motions, which help to improve the continuity of the generated motions. Comparisons with the state-of-the-art methods show that our method effectively transfers the style of the motions while retaining the complete content and has stronger generalization ability in unseen style features.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Existing motion style transfer methods trained with unpaired samples tend to generate motions with inconsistent content or inconsistent number of frames when compared with the source motion. Moreover, due to the limited training samples, these methods perform worse in unseen style. In this paper, we propose a novel unpaired motion style transfer framework that generates complete stylized motions with consistent content. We introduce a motion-oriented projection flow network (M-PFN) designed for temporal motion data, which encodes the content and style motions into latent codes and decodes the stylized features produced by adaptive instance normalization (AdaIN) into stylized motions. The M-PFN contains dedicated operations and modules, e.g., Transformer, to process the temporal information of motions, which help to improve the continuity of the generated motions. Comparisons with the state-of-the-art methods show that our method effectively transfers the style of the motions while retaining the complete content and has stronger generalization ability in unseen style features.",
"fno": "09859776",
"keywords": [
"Image Motion Analysis",
"Image Sequences",
"Learning Artificial Intelligence",
"Video Signal Processing",
"Unpaired Motion Style Transfer",
"Motion Oriented Projection Flow Network",
"Source Motion",
"Stylized Motions",
"Temporal Motion Data",
"Adaptive Instance Normalization",
"Unpaired Motion Style",
"M PFN",
"Generalization Ability",
"Ada IN",
"Training",
"Interpolation",
"Codes",
"Adaptive Systems",
"Transformers",
"Skeleton",
"Data Models",
"Motion Generation",
"Style Transfer",
"Flow Network",
"Ada IN"
],
"authors": [
{
"affiliation": "Sun Yat-sen University",
"fullName": "Yue Huang",
"givenName": "Yue",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sun Yat-sen University",
"fullName": "Haoran Mo",
"givenName": "Haoran",
"surname": "Mo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sun Yat-sen University",
"fullName": "Xiao Liang",
"givenName": "Xiao",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sun Yat-sen University",
"fullName": "Chengying Gao",
"givenName": "Chengying",
"surname": "Gao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8563-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09859591",
"articleId": "1G9Ep1BWxIQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09859923",
"articleId": "1G9EPU6VcuQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dmamh/2007/3065/0/30650129",
"title": "Style-Based Motion Editing",
"doi": null,
"abstractUrl": "/proceedings-article/dmamh/2007/30650129/12OmNx7XGZA",
"parentPublication": {
"id": "proceedings/dmamh/2007/3065/0",
"title": "Digital Media and its Application in Museum & Heritage/Digital Media and its Application in Museum & Heritage, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvmp/2010/4268/0/4268a009",
"title": "Camera Motion Style Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/cvmp/2010/4268a009/12OmNyPQ4O1",
"parentPublication": {
"id": "proceedings/cvmp/2010/4268/0",
"title": "2010 Conference on Visual Media Production",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/04/mcg2017040042",
"title": "Fast Neural Style Transfer for Motion Data",
"doi": null,
"abstractUrl": "/magazine/cg/2017/04/mcg2017040042/13rRUxbCbsI",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4598",
"title": "StyleFormer: Real-time Arbitrary Style Transfer via Parametric Style Composition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4598/1BmL5FTElEI",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g583",
"title": "Style-ERD: Responsive and Coherent Online Motion Style Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g583/1H0NZuIQvyE",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09992151",
"title": "Personalized Audio-Driven 3D Facial Animation Via Style-Content Disentanglement",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09992151/1JevBLSiUqA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/9.346E214",
"title": "Learning Style Subspaces for Controllable Unpaired Domain Translation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/9.346E214/1La4JaMnIgE",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a066",
"title": "Saliency-Guided Image Style Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a066/1cJ0zw9Ceru",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/08953861",
"title": "MSCap: Multi-Style Image Captioning With Unpaired Stylized Text",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/08953861/1gyrJlYn8Ig",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900n3607",
"title": "Autoregressive Stylized Motion Synthesis with Generative Flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900n3607/1yeIFQTwlXO",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxcMSdA",
"title": "2016 International Conference on Smart Grid and Electrical Automation (ICSGEA)",
"acronym": "icsgea",
"groupId": "1814444",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwpXRSZ",
"doi": "10.1109/ICSGEA.2016.22",
"title": "Research on the Aesthetic Mode of Digital Painting Based on Digital Technology",
"normalizedTitle": "Research on the Aesthetic Mode of Digital Painting Based on Digital Technology",
"abstract": "In order to take full advantage of digital technology, further the aesthetic value of digital painting, the differences between digital painting and traditional painting are discussed in this paper. This thesis made the research from presentation, preservation, dissemination and other aspects, provided differentiated research method and contrastive analysis about digital aesthetic. The results showed that digital painting cannot be researched in individually and extremely way. On the contrary, the research of digital and traditional painting should learn from each other and develop collectively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In order to take full advantage of digital technology, further the aesthetic value of digital painting, the differences between digital painting and traditional painting are discussed in this paper. This thesis made the research from presentation, preservation, dissemination and other aspects, provided differentiated research method and contrastive analysis about digital aesthetic. The results showed that digital painting cannot be researched in individually and extremely way. On the contrary, the research of digital and traditional painting should learn from each other and develop collectively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In order to take full advantage of digital technology, further the aesthetic value of digital painting, the differences between digital painting and traditional painting are discussed in this paper. This thesis made the research from presentation, preservation, dissemination and other aspects, provided differentiated research method and contrastive analysis about digital aesthetic. The results showed that digital painting cannot be researched in individually and extremely way. On the contrary, the research of digital and traditional painting should learn from each other and develop collectively.",
"fno": "07733858",
"keywords": [
"Painting",
"Art",
"Computers",
"Technological Innovation",
"Information Technology",
"Oils",
"Writing",
"Unity Of Opposites",
"Digital Painting",
"Digital Aesthetic"
],
"authors": [
{
"affiliation": null,
"fullName": "Wei Luo",
"givenName": "Wei",
"surname": "Luo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jie Yang",
"givenName": "Jie",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yang Hua",
"givenName": "Yang",
"surname": "Hua",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icsgea",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-08-01T00:00:00",
"pubType": "proceedings",
"pages": "304-307",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3578-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07733857",
"articleId": "12OmNxEBz6f",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07733859",
"articleId": "12OmNBBzoea",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/svr/2013/5001/0/06655801",
"title": "Art Making Using an Haptic Device for Interactive Digital Painting",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2013/06655801/12OmNBUAvWs",
"parentPublication": {
"id": "proceedings/svr/2013/5001/0",
"title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2017/4283/0/4283a179",
"title": "Classification and Aesthetic Evaluation of Paintings and Artworks",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2017/4283a179/12OmNrMHOnl",
"parentPublication": {
"id": "proceedings/sitis/2017/4283/0",
"title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2011/0868/0/06004084",
"title": "Digital Photo Painting as an Artistic and Cultural Phenomenon",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2011/06004084/12OmNvqEvQA",
"parentPublication": {
"id": "proceedings/iv/2011/0868/0",
"title": "2011 15th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2015/9393/0/9393a817",
"title": "The Application of Ink Element in the Creation of Digital Image",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2015/9393a817/12OmNxHryik",
"parentPublication": {
"id": "proceedings/isdea/2015/9393/0",
"title": "2015 Sixth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042343",
"title": "A Modular Framework for Digital Painting",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042343/13rRUxDIthe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvris/2018/8031/0/803100a275",
"title": "Design and Implementation of Oil Painting Online Appreciation System Based on Android",
"doi": null,
"abstractUrl": "/proceedings-article/icvris/2018/803100a275/17D45WcjjPO",
"parentPublication": {
"id": "proceedings/icvris/2018/8031/0",
"title": "2018 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fones-aiot/2021/1091/0/109100a238",
"title": "The Art of Oil Paintings Creation with Computer Multimedia Technology",
"doi": null,
"abstractUrl": "/proceedings-article/fones-aiot/2021/109100a238/1CKQYQ1Dj9K",
"parentPublication": {
"id": "proceedings/fones-aiot/2021/1091/0",
"title": "2021 International Conference on Forthcoming Networks and Sustainability in AIoT Era (FoNeS-AIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdiime/2022/9009/0/900900a065",
"title": "Research into Digital Oil Painting Restoration Algorithm Based on Image Acquisition Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icdiime/2022/900900a065/1Iz53Bt8muQ",
"parentPublication": {
"id": "proceedings/icdiime/2022/9009/0",
"title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08765801",
"title": "Vectorized Painting with Temporal Diffusion Curves",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08765801/1bLypqX0rwA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cipae/2020/8223/0/822300a057",
"title": "Exploration of Painting Creation Media and Concept Based on Visual Communication Design",
"doi": null,
"abstractUrl": "/proceedings-article/cipae/2020/822300a057/1rSRj01oltm",
"parentPublication": {
"id": "proceedings/cipae/2020/8223/0",
"title": "2020 International Conference on Computers, Information Processing and Advanced Education (CIPAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKiqs",
"title": "2018 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)",
"acronym": "icvris",
"groupId": "1828444",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WcjjPO",
"doi": "10.1109/ICVRIS.2018.00074",
"title": "Design and Implementation of Oil Painting Online Appreciation System Based on Android",
"normalizedTitle": "Design and Implementation of Oil Painting Online Appreciation System Based on Android",
"abstract": "This paper takes online display of oil paintings as the main research objective. It studies design and application of oil painting online display system and constructs a set of mobile platform-based oil painting appreciation system to be adapted to response-front layout of mobile terminal device. We apply MVC framework of PHP in WebApp development. Front-end adopts the currently latest HTMLS technology and Bootstrap framework so that system has effective compatibility and platform crossing to provide unified experience for useless devices. The system establishes role authority control-based system and associates content to corresponding roles through detailed role distribution of content so the development of system business logic is more standardized. Finally, various functions and performance indexes in oil painting online display system are tested and the feasibility of this improved scheme is proved.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper takes online display of oil paintings as the main research objective. It studies design and application of oil painting online display system and constructs a set of mobile platform-based oil painting appreciation system to be adapted to response-front layout of mobile terminal device. We apply MVC framework of PHP in WebApp development. Front-end adopts the currently latest HTMLS technology and Bootstrap framework so that system has effective compatibility and platform crossing to provide unified experience for useless devices. The system establishes role authority control-based system and associates content to corresponding roles through detailed role distribution of content so the development of system business logic is more standardized. Finally, various functions and performance indexes in oil painting online display system are tested and the feasibility of this improved scheme is proved.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper takes online display of oil paintings as the main research objective. It studies design and application of oil painting online display system and constructs a set of mobile platform-based oil painting appreciation system to be adapted to response-front layout of mobile terminal device. We apply MVC framework of PHP in WebApp development. Front-end adopts the currently latest HTMLS technology and Bootstrap framework so that system has effective compatibility and platform crossing to provide unified experience for useless devices. The system establishes role authority control-based system and associates content to corresponding roles through detailed role distribution of content so the development of system business logic is more standardized. Finally, various functions and performance indexes in oil painting online display system are tested and the feasibility of this improved scheme is proved.",
"fno": "803100a275",
"keywords": [
"Android Operating System",
"Authorisation",
"Computer Bootstrapping",
"Hypermedia Markup Languages",
"Internet",
"Mobile Computing",
"Painting",
"Role Authority Control Based System",
"System Business Logic",
"Mobile Terminal Device",
"Oil Painting Appreciation System",
"Oil Painting Online Display System",
"Mobile Platform",
"PHP",
"Web App Development",
"HTMLS Technology",
"Bootstrap Framework",
"Painting",
"Oils",
"Art",
"Databases",
"Business",
"Servers",
"Oil Painting",
"Android",
"B S",
"Cell",
"Modules"
],
"authors": [
{
"affiliation": null,
"fullName": "Feng Lv",
"givenName": "Feng",
"surname": "Lv",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvris",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-08-01T00:00:00",
"pubType": "proceedings",
"pages": "275-278",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-8031-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "803100a271",
"articleId": "17D45WK5ApA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "803100a279",
"articleId": "17D45WODaqq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iai/2004/8387/0/01300934",
"title": "Imaging and rendering of oil paintings using a multi-band camera",
"doi": null,
"abstractUrl": "/proceedings-article/iai/2004/01300934/12OmNB7tUq4",
"parentPublication": {
"id": "proceedings/iai/2004/8387/0",
"title": "2004 Southwest Symposium on Image Analysis and Interpretation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/112P1C04",
"title": "Synthesizing oil painting surface geometry from a single photograph",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/112P1C04/12OmNBBzojI",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsgea/2016/3578/0/07733858",
"title": "Research on the Aesthetic Mode of Digital Painting Based on Digital Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icsgea/2016/07733858/12OmNwpXRSZ",
"parentPublication": {
"id": "proceedings/icsgea/2016/3578/0",
"title": "2016 International Conference on Smart Grid and Electrical Automation (ICSGEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1999/5897/0/00809905",
"title": "Visualizing Multivalued Data from 2D Incompressible Flows Using concepts from painting",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1999/00809905/12OmNzwZ6y7",
"parentPublication": {
"id": "proceedings/visual/1999/5897/0",
"title": "Proceedings Visualization '99",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1999/5897/0/00809905",
"title": "Visualizing Multivalued Data from 2D Incompressible Flows Using concepts from painting",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1999/00809905/19wAEC4KbjG",
"parentPublication": {
"id": "proceedings/ieee-vis/1999/5897/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fones-aiot/2021/1091/0/109100a238",
"title": "The Art of Oil Paintings Creation with Computer Multimedia Technology",
"doi": null,
"abstractUrl": "/proceedings-article/fones-aiot/2021/109100a238/1CKQYQ1Dj9K",
"parentPublication": {
"id": "proceedings/fones-aiot/2021/1091/0",
"title": "2021 International Conference on Forthcoming Networks and Sustainability in AIoT Era (FoNeS-AIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdiime/2022/9009/0/900900a117",
"title": "Research into Oil Painting Recognition and Analysis System Based on Image Intelligent Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icdiime/2022/900900a117/1Iz51pu8F4A",
"parentPublication": {
"id": "proceedings/icdiime/2022/9009/0",
"title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdiime/2022/9009/0/900900a065",
"title": "Research into Digital Oil Painting Restoration Algorithm Based on Image Acquisition Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icdiime/2022/900900a065/1Iz53Bt8muQ",
"parentPublication": {
"id": "proceedings/icdiime/2022/9009/0",
"title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mcsoc/2022/6499/0/649900a145",
"title": "Design and implementation of vehicle oil online information monitoring system",
"doi": null,
"abstractUrl": "/proceedings-article/mcsoc/2022/649900a145/1JZ3dqgtD7q",
"parentPublication": {
"id": "proceedings/mcsoc/2022/6499/0",
"title": "2022 IEEE 15th International Symposium on Embedded Multicore/Many-core Systems-on-Chip (MCSoC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08765801",
"title": "Vectorized Painting with Temporal Diffusion Curves",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08765801/1bLypqX0rwA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwB2dXb",
"title": "2009 Third International Symposium on Intelligent Information Technology Application",
"acronym": "iita",
"groupId": "1002566",
"volume": "3",
"displayVolume": "3",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNARRYpM",
"doi": "10.1109/IITA.2009.221",
"title": "Supervised Locally Linear Embedding in Tensor Space",
"normalizedTitle": "Supervised Locally Linear Embedding in Tensor Space",
"abstract": "The paper propose a new non-linear dimensionality reduction algorithm based on locally linear embedding called supervised locally linear embedding in tensor space (SLLE/T), in which the local manifold structure within same class are preserved and the separability between different classes is enforced by maximizing distance of each point with its neighbors. To keep structure of data, we introduce tensor representation and reduce SLLE/T into the optimization problem based on HOSVD which is desirable to solve the out of sample problem. We also prove SLLE/T can be united in the graph embedding framework. The comparison experiments on face recognition indicate that SLLE/T outperform most popular dimensionality reduction algorithms both vectorization and tensor version.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The paper propose a new non-linear dimensionality reduction algorithm based on locally linear embedding called supervised locally linear embedding in tensor space (SLLE/T), in which the local manifold structure within same class are preserved and the separability between different classes is enforced by maximizing distance of each point with its neighbors. To keep structure of data, we introduce tensor representation and reduce SLLE/T into the optimization problem based on HOSVD which is desirable to solve the out of sample problem. We also prove SLLE/T can be united in the graph embedding framework. The comparison experiments on face recognition indicate that SLLE/T outperform most popular dimensionality reduction algorithms both vectorization and tensor version.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The paper propose a new non-linear dimensionality reduction algorithm based on locally linear embedding called supervised locally linear embedding in tensor space (SLLE/T), in which the local manifold structure within same class are preserved and the separability between different classes is enforced by maximizing distance of each point with its neighbors. To keep structure of data, we introduce tensor representation and reduce SLLE/T into the optimization problem based on HOSVD which is desirable to solve the out of sample problem. We also prove SLLE/T can be united in the graph embedding framework. The comparison experiments on face recognition indicate that SLLE/T outperform most popular dimensionality reduction algorithms both vectorization and tensor version.",
"fno": "3859c031",
"keywords": [
"Supervised Learning",
"Locally Linear Embedding",
"Tensor Space",
"HOSVD",
"Dimensionality Reduction"
],
"authors": [
{
"affiliation": null,
"fullName": "Chang Liu",
"givenName": "Chang",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "JiLiu Zhou",
"givenName": "JiLiu",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kun He",
"givenName": "Kun",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "YanLi Zhu",
"givenName": "YanLi",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "DongFang Wang",
"givenName": "DongFang",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "JianPing Xia",
"givenName": "JianPing",
"surname": "Xia",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iita",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-11-01T00:00:00",
"pubType": "proceedings",
"pages": "31-34",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3859-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3859c027",
"articleId": "12OmNrJROXh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3859c035",
"articleId": "12OmNzwZ6hf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2006/2521/4/252140194",
"title": "Building Connected Neighborhood Graphs for Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252140194/12OmNBEYzLG",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/4",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2008/3359/0/3359a223",
"title": "Neighbourhood Discriminant Locally Linear Embedding in Face Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2008/3359a223/12OmNCeK2aG",
"parentPublication": {
"id": "proceedings/cgiv/2008/3359/0",
"title": "2008 Fifth International Conference on Computer Graphics, Imaging and Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a531",
"title": "Globally-Preserving Based Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a531/12OmNCm7BMN",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995563",
"title": "sLLE: Spherical locally linear embedding with applications to tomography",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995563/12OmNwGIcwJ",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2005/2319/0/23190290",
"title": "Face Recognition with Weighted Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2005/23190290/12OmNweBUPT",
"parentPublication": {
"id": "proceedings/crv/2005/2319/0",
"title": "The 2nd Canadian Conference on Computer and Robot Vision (CRV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/2/3119b039",
"title": "Multi-pose Ear Recognition Based on Improved Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119b039/12OmNyoiZck",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/3",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2011/4584/0/4584b211",
"title": "Soft-Voting Classification using Locally Linear Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2011/4584b211/12OmNywxlSD",
"parentPublication": {
"id": "proceedings/cis/2011/4584/0",
"title": "2011 Seventh International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/4/3336g394",
"title": "Local Linear Embedding in Dimensionality Reduction Based on Small World Principle",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336g394/12OmNzBOhUH",
"parentPublication": {
"id": "csse/2008/3336/4",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/3/3962e338",
"title": "Shot Transition Detection Based on Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962e338/12OmNzb7ZrJ",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/3",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761293",
"title": "Clustering-based locally linear embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761293/12OmNzcPAdv",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvEyR7P",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "4",
"displayVolume": "4",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBEYzLG",
"doi": "10.1109/ICPR.2006.345",
"title": "Building Connected Neighborhood Graphs for Locally Linear Embedding",
"normalizedTitle": "Building Connected Neighborhood Graphs for Locally Linear Embedding",
"abstract": "Locally linear embedding is a nonlinear method for dimensionality reduction and manifold learning. It requires well-sampled input data in high dimensional space so that neighborhoods of all data points overlap with each other. In this paper, we build connected neighborhood graphs for the purpose of assigning neighbor points. A few methods are examined to build connected neighborhood graphs. They have made LLE applicable to a wide range of data including under-sampled data and non-uniformly distributed data. These methods are compared through experiments on both synthetic and real world data sets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Locally linear embedding is a nonlinear method for dimensionality reduction and manifold learning. It requires well-sampled input data in high dimensional space so that neighborhoods of all data points overlap with each other. In this paper, we build connected neighborhood graphs for the purpose of assigning neighbor points. A few methods are examined to build connected neighborhood graphs. They have made LLE applicable to a wide range of data including under-sampled data and non-uniformly distributed data. These methods are compared through experiments on both synthetic and real world data sets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Locally linear embedding is a nonlinear method for dimensionality reduction and manifold learning. It requires well-sampled input data in high dimensional space so that neighborhoods of all data points overlap with each other. In this paper, we build connected neighborhood graphs for the purpose of assigning neighbor points. A few methods are examined to build connected neighborhood graphs. They have made LLE applicable to a wide range of data including under-sampled data and non-uniformly distributed data. These methods are compared through experiments on both synthetic and real world data sets.",
"fno": "252140194",
"keywords": [
"Dimensionality Reduction",
"Locally Linear Embedding",
"Manifold Learning"
],
"authors": [
{
"affiliation": "Western Michigan University, Kalamazoo",
"fullName": "Li Yang",
"givenName": "Li",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-08-01T00:00:00",
"pubType": "proceedings",
"pages": "194-197",
"year": "2006",
"issn": "1051-4651",
"isbn": "0-7695-2521-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "252140189",
"articleId": "12OmNykTNn6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "252140198",
"articleId": "12OmNAk5HPx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iita/2009/3859/3/3859c031",
"title": "Supervised Locally Linear Embedding in Tensor Space",
"doi": null,
"abstractUrl": "/proceedings-article/iita/2009/3859c031/12OmNARRYpM",
"parentPublication": {
"id": "proceedings/iita/2009/3859/3",
"title": "2009 Third International Symposium on Intelligent Information Technology Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a531",
"title": "Globally-Preserving Based Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a531/12OmNCm7BMN",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2008/3305/5/3305e305",
"title": "Generalized Locally Linear Embedding Based on Local Reconstruction Similarity",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2008/3305e305/12OmNqNoscX",
"parentPublication": {
"id": "proceedings/fskd/2008/3305/5",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995563",
"title": "sLLE: Spherical locally linear embedding with applications to tomography",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995563/12OmNwGIcwJ",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kam/2009/3888/2/3888b219",
"title": "Active Neighborhood Selection for Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/kam/2009/3888b219/12OmNwLOYU8",
"parentPublication": {
"id": "proceedings/kam/2009/3888/2",
"title": "Knowledge Acquisition and Modeling, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2005/2319/0/23190290",
"title": "Face Recognition with Weighted Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2005/23190290/12OmNweBUPT",
"parentPublication": {
"id": "proceedings/crv/2005/2319/0",
"title": "The 2nd Canadian Conference on Computer and Robot Vision (CRV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/4/252140202",
"title": "Locally Multidimensional Scaling for Nonlinear Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252140202/12OmNx9WSYj",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/4",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/2/3119b039",
"title": "Multi-pose Ear Recognition Based on Improved Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119b039/12OmNyoiZck",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/3",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2006/05/i0827",
"title": "Building k-Connected Neighborhood Graphs for Isometric Data Embedding",
"doi": null,
"abstractUrl": "/journal/tp/2006/05/i0827/13rRUxly8Yy",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyKJiaV",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCm7BMN",
"doi": "10.1109/ICPR.2010.135",
"title": "Globally-Preserving Based Locally Linear Embedding",
"normalizedTitle": "Globally-Preserving Based Locally Linear Embedding",
"abstract": "The locally linear embedding (LLE) algorithm is considered as a powerful method for the problem of nonlinear dimensionality reduction. In this paper, a new method called globally-preserving based LLE (GPLLE) is proposed. It not only preserves the local neighborhood, but also keeps those distant samples still far away, which solves the problem that LLE may encounter, i.e. LLE only makes local neighborhood preserving, but can’t prevent the distant samples from nearing. Moreover, GPLLE can estimate the intrinsic dimensionality d of the manifold structure. The experiment results show that GPLLE always achieves better classification performances than LLE based on the estimated d.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The locally linear embedding (LLE) algorithm is considered as a powerful method for the problem of nonlinear dimensionality reduction. In this paper, a new method called globally-preserving based LLE (GPLLE) is proposed. It not only preserves the local neighborhood, but also keeps those distant samples still far away, which solves the problem that LLE may encounter, i.e. LLE only makes local neighborhood preserving, but can’t prevent the distant samples from nearing. Moreover, GPLLE can estimate the intrinsic dimensionality d of the manifold structure. The experiment results show that GPLLE always achieves better classification performances than LLE based on the estimated d.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The locally linear embedding (LLE) algorithm is considered as a powerful method for the problem of nonlinear dimensionality reduction. In this paper, a new method called globally-preserving based LLE (GPLLE) is proposed. It not only preserves the local neighborhood, but also keeps those distant samples still far away, which solves the problem that LLE may encounter, i.e. LLE only makes local neighborhood preserving, but can’t prevent the distant samples from nearing. Moreover, GPLLE can estimate the intrinsic dimensionality d of the manifold structure. The experiment results show that GPLLE always achieves better classification performances than LLE based on the estimated d.",
"fno": "4109a531",
"keywords": [
"Dimensionality Reduction",
"Manifold Learning",
"Globally Preserving",
"Locally Linear",
"Dimensionality Estimation"
],
"authors": [
{
"affiliation": null,
"fullName": "Kanghua Hui",
"givenName": "Kanghua",
"surname": "Hui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chunheng Wang",
"givenName": "Chunheng",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Baihua Xiao",
"givenName": "Baihua",
"surname": "Xiao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "531-534",
"year": "2010",
"issn": "1051-4651",
"isbn": "978-0-7695-4109-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4109a527",
"articleId": "12OmNCeaQ2g",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4109a535",
"articleId": "12OmNyUFfXP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2006/2521/4/252140194",
"title": "Building Connected Neighborhood Graphs for Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252140194/12OmNBEYzLG",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/4",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2008/3359/0/3359a223",
"title": "Neighbourhood Discriminant Locally Linear Embedding in Face Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2008/3359a223/12OmNCeK2aG",
"parentPublication": {
"id": "proceedings/cgiv/2008/3359/0",
"title": "2008 Fifth International Conference on Computer Graphics, Imaging and Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2008/3305/5/3305e305",
"title": "Generalized Locally Linear Embedding Based on Local Reconstruction Similarity",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2008/3305e305/12OmNqNoscX",
"parentPublication": {
"id": "proceedings/fskd/2008/3305/5",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kam/2009/3888/2/3888b219",
"title": "Active Neighborhood Selection for Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/kam/2009/3888b219/12OmNwLOYU8",
"parentPublication": {
"id": "proceedings/kam/2009/3888/2",
"title": "Knowledge Acquisition and Modeling, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2005/2319/0/23190290",
"title": "Face Recognition with Weighted Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2005/23190290/12OmNweBUPT",
"parentPublication": {
"id": "proceedings/crv/2005/2319/0",
"title": "The 2nd Canadian Conference on Computer and Robot Vision (CRV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bmei/2008/3118/2/3118b502",
"title": "Speech Visualization based on Locally Linear Embedding (LLE) for the Hearing Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/bmei/2008/3118b502/12OmNwpXRXs",
"parentPublication": {
"id": "proceedings/bmei/2008/3118/2",
"title": "BioMedical Engineering and Informatics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscid/2008/3311/2/3311b152",
"title": "Facial Pose and Expression Analysis Based on Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/iscid/2008/3311b152/12OmNxxdZGa",
"parentPublication": {
"id": "proceedings/iscid/2008/3311/2",
"title": "2008 International Symposium on Computational Intelligence and Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/2/3119b039",
"title": "Multi-pose Ear Recognition Based on Improved Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119b039/12OmNyoiZck",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/3",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmsp/2011/4356/2/4356b245",
"title": "Compressed Locally Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/cmsp/2011/4356b245/12OmNz61doK",
"parentPublication": {
"id": "proceedings/cmsp/2011/4356/2",
"title": "Multimedia and Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2018/0169/0/016900a378",
"title": "An Improved Weighted Local Linear Embedding Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2018/016900a378/17D45WgziSI",
"parentPublication": {
"id": "proceedings/cis/2018/0169/0",
"title": "2018 14th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCbU3aP",
"title": "2009 WRI Global Congress on Intelligent Systems",
"acronym": "gcis",
"groupId": "1002842",
"volume": "2",
"displayVolume": "2",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqFrGrQ",
"doi": "10.1109/GCIS.2009.8",
"title": "A New Method for Linear Dimensionality Reduction",
"normalizedTitle": "A New Method for Linear Dimensionality Reduction",
"abstract": "A novel class based linear dimensionality reduction method is proposed, called Class-Wise Correlation Preserving Projection (CWCPP). In CWCPP, the relation among the original gene expression data is preserved according to a certain kind of similarity between data points, which takes special consideration of both the correlation information and the class information. Different from the traditional method, i.e., Fisher Linear Discriminant Analysis (FLD), CWCPP utilizes correlation information to guide the procedure of linear projection directions searching. Experiments on yeast gene expression data and NCI gene expression data are performed to test and evaluate the proposed algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A novel class based linear dimensionality reduction method is proposed, called Class-Wise Correlation Preserving Projection (CWCPP). In CWCPP, the relation among the original gene expression data is preserved according to a certain kind of similarity between data points, which takes special consideration of both the correlation information and the class information. Different from the traditional method, i.e., Fisher Linear Discriminant Analysis (FLD), CWCPP utilizes correlation information to guide the procedure of linear projection directions searching. Experiments on yeast gene expression data and NCI gene expression data are performed to test and evaluate the proposed algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A novel class based linear dimensionality reduction method is proposed, called Class-Wise Correlation Preserving Projection (CWCPP). In CWCPP, the relation among the original gene expression data is preserved according to a certain kind of similarity between data points, which takes special consideration of both the correlation information and the class information. Different from the traditional method, i.e., Fisher Linear Discriminant Analysis (FLD), CWCPP utilizes correlation information to guide the procedure of linear projection directions searching. Experiments on yeast gene expression data and NCI gene expression data are performed to test and evaluate the proposed algorithm.",
"fno": "3571b509",
"keywords": [
"Linear Dimensionality Reduction Fisher Linear Discriminant Analysis Class Wise Correlation Preserving Projection"
],
"authors": [
{
"affiliation": null,
"fullName": "Wenjun Wang",
"givenName": "Wenjun",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Junying Zhang",
"givenName": "Junying",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jin Xu",
"givenName": "Jin",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yue Wang",
"givenName": "Yue",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "gcis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-05-01T00:00:00",
"pubType": "proceedings",
"pages": "509-513",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3571-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3571b503",
"articleId": "12OmNzkMlMq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3571b518",
"articleId": "12OmNz5JBS7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibmw/2009/5121/0/05332127",
"title": "Lasso based gene selection for linear classifiers",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2009/05332127/12OmNBNM96L",
"parentPublication": {
"id": "proceedings/bibmw/2009/5121/0",
"title": "2009 IEEE International Conference on Bioinformatics and Biomedicine Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2011/4596/0/4596a865",
"title": "Transferable Discriminative Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2011/4596a865/12OmNy3iFuF",
"parentPublication": {
"id": "proceedings/ictai/2011/4596/0",
"title": "2011 IEEE 23rd International Conference on Tools with Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/2/3119b753",
"title": "Fractional Supervised Orthogonal Local Linear Projection",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119b753/12OmNy7h3cQ",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/3",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/04/ttp2011040858",
"title": "Revisiting Linear Discriminant Techniques in Gender Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2011/04/ttp2011040858/13rRUwIF6mp",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2019/04/08369159",
"title": "A New Formulation of Linear Discriminant Analysis for Robust Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tk/2019/04/08369159/13rRUxASuvN",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/04/v0459",
"title": "Robust Linear Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tg/2004/04/v0459/13rRUxBJhFl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2000/06/i0623",
"title": "Fractional-Step Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tp/2000/06/i0623/13rRUxNmPET",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2001/07/i0762",
"title": "Multiclass Linear Dimension Reduction by Weighted Pairwise Fisher Criteria",
"doi": null,
"abstractUrl": "/journal/tp/2001/07/i0762/13rRUxjQypX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2004/06/i0732",
"title": "Linear Dimensionality Reduction via a Heteroscedastic Extension of LDA: The Chernoff Criterion",
"doi": null,
"abstractUrl": "/journal/tp/2004/06/i0732/13rRUygT7a9",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2019/1867/0/08983403",
"title": "Y-SPCR: A new dimensionality reduction method for gene expression data classification",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2019/08983403/1hgurcXH1Ti",
"parentPublication": {
"id": "proceedings/bibm/2019/1867/0",
"title": "2019 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrAdssJ",
"title": "The 2nd Canadian Conference on Computer and Robot Vision (CRV'05)",
"acronym": "crv",
"groupId": "1001794",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNweBUPT",
"doi": "10.1109/CRV.2005.42",
"title": "Face Recognition with Weighted Locally Linear Embedding",
"normalizedTitle": "Face Recognition with Weighted Locally Linear Embedding",
"abstract": "We present an approach to recognizing faces with varying appearances which also considers the relative probability of occurrence for each appearance. We propose and demonstrate extending dimensionality reduction using locally linear embedding (LLE), to model the local shape of the manifold using neighboring nodes of the graph, where the probability associated with each node is also considered. The approach has been implemented in software and evaluated on the Yale database of face images. Recognition rates are compared with non-weighted LLE and principal component analysis (PCA), and in our setting, weighted LLE achieves superior performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an approach to recognizing faces with varying appearances which also considers the relative probability of occurrence for each appearance. We propose and demonstrate extending dimensionality reduction using locally linear embedding (LLE), to model the local shape of the manifold using neighboring nodes of the graph, where the probability associated with each node is also considered. The approach has been implemented in software and evaluated on the Yale database of face images. Recognition rates are compared with non-weighted LLE and principal component analysis (PCA), and in our setting, weighted LLE achieves superior performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an approach to recognizing faces with varying appearances which also considers the relative probability of occurrence for each appearance. We propose and demonstrate extending dimensionality reduction using locally linear embedding (LLE), to model the local shape of the manifold using neighboring nodes of the graph, where the probability associated with each node is also considered. The approach has been implemented in software and evaluated on the Yale database of face images. Recognition rates are compared with non-weighted LLE and principal component analysis (PCA), and in our setting, weighted LLE achieves superior performance.",
"fno": "23190290",
"keywords": [
"Face Recognition",
"Nonlinear Dimensionality Reduction",
"Locally Linear Embedding"
],
"authors": [
{
"affiliation": "York University, Toronto, Ontario, Canada",
"fullName": "Nathan Mekuz",
"givenName": "Nathan",
"surname": "Mekuz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "York University, Toronto, Ontario, Canada",
"fullName": "Christian Bauckhage",
"givenName": "Christian",
"surname": "Bauckhage",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "York University, Toronto, Ontario, Canada",
"fullName": "John K. Tsotsos",
"givenName": "John K.",
"surname": "Tsotsos",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "crv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-05-01T00:00:00",
"pubType": "proceedings",
"pages": "290-296",
"year": "2005",
"issn": null,
"isbn": "0-7695-2319-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "23190282",
"articleId": "12OmNBgQFMa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "23190298",
"articleId": "12OmNx965Bi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iita/2009/3859/3/3859c031",
"title": "Supervised Locally Linear Embedding in Tensor Space",
"doi": null,
"abstractUrl": "/proceedings-article/iita/2009/3859c031/12OmNARRYpM",
"parentPublication": {
"id": "proceedings/iita/2009/3859/3",
"title": "2009 Third International Symposium on Intelligent Information Technology Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/4/252140194",
"title": "Building Connected Neighborhood Graphs for Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252140194/12OmNBEYzLG",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/4",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2008/3359/0/3359a223",
"title": "Neighbourhood Discriminant Locally Linear Embedding in Face Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2008/3359a223/12OmNCeK2aG",
"parentPublication": {
"id": "proceedings/cgiv/2008/3359/0",
"title": "2008 Fifth International Conference on Computer Graphics, Imaging and Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a531",
"title": "Globally-Preserving Based Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a531/12OmNCm7BMN",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2008/3305/5/3305e305",
"title": "Generalized Locally Linear Embedding Based on Local Reconstruction Similarity",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2008/3305e305/12OmNqNoscX",
"parentPublication": {
"id": "proceedings/fskd/2008/3305/5",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bmei/2008/3118/2/3118b502",
"title": "Speech Visualization based on Locally Linear Embedding (LLE) for the Hearing Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/bmei/2008/3118b502/12OmNwpXRXs",
"parentPublication": {
"id": "proceedings/bmei/2008/3118/2",
"title": "BioMedical Engineering and Informatics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscid/2008/3311/2/3311b152",
"title": "Facial Pose and Expression Analysis Based on Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/iscid/2008/3311b152/12OmNxxdZGa",
"parentPublication": {
"id": "proceedings/iscid/2008/3311/2",
"title": "2008 International Symposium on Computational Intelligence and Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/2/3119b039",
"title": "Multi-pose Ear Recognition Based on Improved Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119b039/12OmNyoiZck",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/3",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/3/3962e338",
"title": "Shot Transition Detection Based on Locally Linear Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962e338/12OmNzb7ZrJ",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/3",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2018/0169/0/016900a378",
"title": "An Improved Weighted Local Linear Embedding Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2018/016900a378/17D45WgziSI",
"parentPublication": {
"id": "proceedings/cis/2018/0169/0",
"title": "2018 14th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1LKxaPjKQo0",
"title": "2022 11th International Conference on Information Communication and Applications (ICICA)",
"acronym": "icica",
"groupId": "10070855",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1LKxb9MicGk",
"doi": "10.1109/ICICA56942.2022.00011",
"title": "A Novel Dimension Reduction Framework Based on UMAP for Improving the Timeliness of Small Samples",
"normalizedTitle": "A Novel Dimension Reduction Framework Based on UMAP for Improving the Timeliness of Small Samples",
"abstract": "UMAP (Uniform Manifold Approximation and Projection) is a fantastic non-linear dimension reduction method, having the capability of quickly processing large datasets. However, it is challenging to balance the timeliness and accuracy when reducing the dimension of the datasets with small samples and noise. To further enhance its timeliness, we propose a novel dimension reduction framework based on UMAP by introducing information entropy and LRR (Low-Rank Representation). We firstly perform LRR on the small sample dataset to remove noise. Besides, we innovatively calculate the entropy threshold with the entropy weight of each data feature to select valuable features. Finally, the dimension of the dataset with valuable features is reduced by UMAP. The datasets generated by us and several UCI datasets are employed to verify that the proposed framework is feasible and effective.",
"abstracts": [
{
"abstractType": "Regular",
"content": "UMAP (Uniform Manifold Approximation and Projection) is a fantastic non-linear dimension reduction method, having the capability of quickly processing large datasets. However, it is challenging to balance the timeliness and accuracy when reducing the dimension of the datasets with small samples and noise. To further enhance its timeliness, we propose a novel dimension reduction framework based on UMAP by introducing information entropy and LRR (Low-Rank Representation). We firstly perform LRR on the small sample dataset to remove noise. Besides, we innovatively calculate the entropy threshold with the entropy weight of each data feature to select valuable features. Finally, the dimension of the dataset with valuable features is reduced by UMAP. The datasets generated by us and several UCI datasets are employed to verify that the proposed framework is feasible and effective.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "UMAP (Uniform Manifold Approximation and Projection) is a fantastic non-linear dimension reduction method, having the capability of quickly processing large datasets. However, it is challenging to balance the timeliness and accuracy when reducing the dimension of the datasets with small samples and noise. To further enhance its timeliness, we propose a novel dimension reduction framework based on UMAP by introducing information entropy and LRR (Low-Rank Representation). We firstly perform LRR on the small sample dataset to remove noise. Besides, we innovatively calculate the entropy threshold with the entropy weight of each data feature to select valuable features. Finally, the dimension of the dataset with valuable features is reduced by UMAP. The datasets generated by us and several UCI datasets are employed to verify that the proposed framework is feasible and effective.",
"fno": "901100a022",
"keywords": [
"Data Preparation",
"Entropy",
"Feature Extraction",
"Pattern Classification",
"Information Entropy",
"Low Rank Representation",
"LRR",
"Nonlinear Dimension Reduction Method",
"Novel Dimension Reduction Framework",
"Sample Dataset",
"UCI Datasets",
"UMAP",
"Uniform Manifold Approximation And Projection",
"Valuable Features",
"Dimensionality Reduction",
"Manifolds",
"Entropy",
"Information Entropy",
"Dimension Reduction",
"UMAP",
"Information Entropy",
"LRR"
],
"authors": [
{
"affiliation": "College of Intelligent Systems Science and Engineering, Harbin Engineering University,Harbin,China",
"fullName": "Nannan Dong",
"givenName": "Nannan",
"surname": "Dong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Intelligent Systems Science and Engineering, Harbin Engineering University,Harbin,China",
"fullName": "Jianhua Cheng",
"givenName": "Jianhua",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Intelligent Systems Science and Engineering, Harbin Engineering University,Harbin,China",
"fullName": "Jiazheng Lv",
"givenName": "Jiazheng",
"surname": "Lv",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Systems Engineering, Academy of Military Science,Beijing,China",
"fullName": "Xudong Zhong",
"givenName": "Xudong",
"surname": "Zhong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icica",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "22-28",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9011-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "901100a018",
"articleId": "1LKxb1dLUzK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "901100a029",
"articleId": "1LKxcsVROlW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2011/4408/0/4408a725",
"title": "Diverse Dimension Decomposition of an Itemset Space",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2011/4408a725/12OmNAOKnQo",
"parentPublication": {
"id": "proceedings/icdm/2011/4408/0",
"title": "2011 IEEE 11th International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciic/2010/4152/0/4152a006",
"title": "Comparing Dimension Reduction Techniques for Arabic Text Classification Using BPNN Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iciic/2010/4152a006/12OmNCcKQyf",
"parentPublication": {
"id": "proceedings/iciic/2010/4152/0",
"title": "Integrated Intelligent Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci-cc/2011/1695/0/06016143",
"title": "Image retrieval based on intrinsic dimension and Shannon entropy",
"doi": null,
"abstractUrl": "/proceedings-article/icci-cc/2011/06016143/12OmNqFrGye",
"parentPublication": {
"id": "proceedings/icci-cc/2011/1695/0",
"title": "2011 10th IEEE International Conference on Cognitive Informatics & Cognitive Computing (ICCI-CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a075",
"title": "Parametric Dimension Reduction by Preserving Local Structure",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a075/1J6henXuhws",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995272",
"title": "The Performance of UMAP plus Linkage Compared with Daura-Clustering of Molecular Dynamics of the PD-1 Checkpoint Receptor",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995272/1JC34WQEmL6",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceee/2019/3910/0/391000a359",
"title": "Dimension Reduction and Its Effects in Hyperspectral Data Classification",
"doi": null,
"abstractUrl": "/proceedings-article/iceee/2019/391000a359/1cpqFQDlfGg",
"parentPublication": {
"id": "proceedings/iceee/2019/3910/0",
"title": "2019 6th International Conference on Electrical and Electronics Engineering (ICEEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlbdbi/2020/9638/0/963800a392",
"title": "Research on PCA Data Dimension Reduction Algorithm Based on Entropy Weight Method",
"doi": null,
"abstractUrl": "/proceedings-article/mlbdbi/2020/963800a392/1rxhB3Eau88",
"parentPublication": {
"id": "proceedings/mlbdbi/2020/9638/0",
"title": "2020 2nd International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413180",
"title": "Detecting Rare Cell Populations in Flow Cytometry Data Using UMAP",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413180/1tmhj2mshy0",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412261",
"title": "Improved Time-Series Clustering with UMAP dimension reduction method",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412261/1tmjjqNsISc",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900c115",
"title": "Compact and Effective Representations for Sketch-based Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900c115/1yXsSBBkcOA",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAkEU4e",
"title": "Proceedings Geometric Modeling and Processing 2000. Theory and Applications",
"acronym": "gmap",
"groupId": "1000306",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBJw9Px",
"doi": "10.1109/GMAP.2000.838252",
"title": "Robust watermarking of polygonal meshes",
"normalizedTitle": "Robust watermarking of polygonal meshes",
"abstract": "This paper presents two variations of a robust watermarking method for general polygonal meshes of arbitrary topology which can be used for copyright protection, tamper proofing or content annotation purposes. The proposed watermark is immune to translation, rotation, scaling or affine transformation of the mesh and is hard to detect unless the exact encoding parameters are disclosed. Several examples demonstrate the effectiveness of the algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents two variations of a robust watermarking method for general polygonal meshes of arbitrary topology which can be used for copyright protection, tamper proofing or content annotation purposes. The proposed watermark is immune to translation, rotation, scaling or affine transformation of the mesh and is hard to detect unless the exact encoding parameters are disclosed. Several examples demonstrate the effectiveness of the algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents two variations of a robust watermarking method for general polygonal meshes of arbitrary topology which can be used for copyright protection, tamper proofing or content annotation purposes. The proposed watermark is immune to translation, rotation, scaling or affine transformation of the mesh and is hard to detect unless the exact encoding parameters are disclosed. Several examples demonstrate the effectiveness of the algorithm.",
"fno": "00838252",
"keywords": [
"Robustness",
"Watermarking",
"Copyright Protection",
"Decoding",
"Electrical Capacitance Tomography",
"Encoding",
"Degradation",
"Computer Science",
"Reactive Power",
"Topology"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci. & Eng., Arizona State Univ., Tempe, AZ, USA",
"fullName": "M.G. Wagner",
"givenName": "M.G.",
"surname": "Wagner",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "gmap",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-01-01T00:00:00",
"pubType": "proceedings",
"pages": "201,202,203,204,205,206,207,208",
"year": "2000",
"issn": null,
"isbn": "0-7695-0562-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00838251",
"articleId": "12OmNyen1mg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00838253",
"articleId": "12OmNqMPfQJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccsee/2012/4647/2/4647b077",
"title": "Study of Digital Video Watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647b077/12OmNBVIUy8",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/1/3119a504",
"title": "Digital Image Watermarking with Blind Detection for Copyright Verification",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119a504/12OmNBVIUzR",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/1",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539001",
"title": "Digital Image Watermarking: An Overview",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539001/12OmNqzLHQl",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsacw/2012/4758/0/4758a433",
"title": "Robust XML Watermarking Using Fuzzy Queries",
"doi": null,
"abstractUrl": "/proceedings-article/compsacw/2012/4758a433/12OmNrAdsH2",
"parentPublication": {
"id": "proceedings/compsacw/2012/4758/0",
"title": "2012 IEEE 36th Annual Computer Software and Applications Conference Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mines/2009/3843/2/3843b159",
"title": "Blind Audio Watermarking Scheme for Copyright Protection of Multi-user",
"doi": null,
"abstractUrl": "/proceedings-article/mines/2009/3843b159/12OmNrkT7vl",
"parentPublication": {
"id": "proceedings/mines/2009/3843/2",
"title": "Multimedia Information Networking and Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc/2009/3737/0/3737a342",
"title": "An Image Copyright Protection Scheme with Tamper Detection Capability",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc/2009/3737a342/12OmNviHKni",
"parentPublication": {
"id": "proceedings/uic-atc/2009/3737/0",
"title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icimt/2009/3922/0/3922a230",
"title": "A Review of Digital Watermarking Techniques for Text Documents",
"doi": null,
"abstractUrl": "/proceedings-article/icimt/2009/3922a230/12OmNx965GR",
"parentPublication": {
"id": "proceedings/icimt/2009/3922/0",
"title": "Information and Multimedia Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gmp/2000/0562/0/05620201",
"title": "Robust Watermarking of Polygonal Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/gmp/2000/05620201/12OmNypIYGu",
"parentPublication": {
"id": "proceedings/gmp/2000/0562/0",
"title": "Geometric Modeling and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicic/2006/2616/1/26160253",
"title": "A Robust Geometry-Based Watermarking Scheme for 3D Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/icicic/2006/26160253/12OmNzcPA35",
"parentPublication": {
"id": "proceedings/icicic/2006/2616/1",
"title": "Innovative Computing ,Information and Control, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom-bigdatase/2018/4388/0/438801a086",
"title": "A Privacy-Preserving Multipurpose Watermarking Scheme for Audio Authentication and Protection",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom-bigdatase/2018/438801a086/17D45X2fUHq",
"parentPublication": {
"id": "proceedings/trustcom-bigdatase/2018/4388/0",
"title": "2018 17th IEEE International Conference On Trust, Security And Privacy In Computing And Communications/ 12th IEEE International Conference On Big Data Science And Engineering (TrustCom/BigDataSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxQOjzD",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "1997",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx6xHp7",
"doi": "10.1109/VISUAL.1997.663909",
"title": "Controlled simplification of genus for polygonal models",
"normalizedTitle": "Controlled simplification of genus for polygonal models",
"abstract": "Genus-reducing simplifications are important in constructing multiresolution hierarchies for level-of-detail-based rendering, especially for datasets that have several relatively small holes, tunnels, and cavities. We present a genus-reducing simplification approach that is complementary to the existing work on genus-preserving simplifications. We propose a simplification framework in which genus-reducing and genus-preserving simplifications alternate to yield much better multiresolution hierarchies than would have been possible by using either one of them. In our approach we first identify the holes and the concavities by extending the concept of /spl alpha/-hulls to polygonal meshes under the L/sub /spl infin// distance metric and then generate valid triangulations to fill them.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Genus-reducing simplifications are important in constructing multiresolution hierarchies for level-of-detail-based rendering, especially for datasets that have several relatively small holes, tunnels, and cavities. We present a genus-reducing simplification approach that is complementary to the existing work on genus-preserving simplifications. We propose a simplification framework in which genus-reducing and genus-preserving simplifications alternate to yield much better multiresolution hierarchies than would have been possible by using either one of them. In our approach we first identify the holes and the concavities by extending the concept of /spl alpha/-hulls to polygonal meshes under the L/sub /spl infin// distance metric and then generate valid triangulations to fill them.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Genus-reducing simplifications are important in constructing multiresolution hierarchies for level-of-detail-based rendering, especially for datasets that have several relatively small holes, tunnels, and cavities. We present a genus-reducing simplification approach that is complementary to the existing work on genus-preserving simplifications. We propose a simplification framework in which genus-reducing and genus-preserving simplifications alternate to yield much better multiresolution hierarchies than would have been possible by using either one of them. In our approach we first identify the holes and the concavities by extending the concept of /spl alpha/-hulls to polygonal meshes under the L/sub /spl infin// distance metric and then generate valid triangulations to fill them.",
"fno": "82620403",
"keywords": [
"Rendering Computer Graphics Controlled Simplification Polygonal Models Genus Reducing Simplifications Multiresolution Hierarchies Level Of Detail Based Rendering Datasets Small Holes Tunnels Cavities Genus Preserving Simplifications Simplification Framework Spl Alpha Hulls Polygonal Meshes L Sub Spl Infin Distance Metric Triangulations Computational Geometry Object Representations"
],
"authors": [
{
"affiliation": "State Univ. of New York, Stony Brook, NY, USA",
"fullName": "J. El-Sana",
"givenName": "J.",
"surname": "El-Sana",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State Univ. of New York, Stony Brook, NY, USA",
"fullName": "A. Varshney",
"givenName": "A.",
"surname": "Varshney",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1997-10-01T00:00:00",
"pubType": "proceedings",
"pages": "403",
"year": "1997",
"issn": "1070-2385",
"isbn": "0-8186-8262-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "82620395",
"articleId": "12OmNxFJXLB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "82620413",
"articleId": "12OmNxUdv9R",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "1KOuVybvP20",
"title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)",
"acronym": "fg",
"groupId": "1000065",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1KOv3B65jAA",
"doi": "10.1109/FG57933.2023.10042514",
"title": "Learning Continuous Mesh Representation with Spherical Implicit Surface",
"normalizedTitle": "Learning Continuous Mesh Representation with Spherical Implicit Surface",
"abstract": "As the most common representation for 3D shapes, mesh is often stored discretely with arrays of vertices and faces. However, 3D shapes in the real world are presented continuously. In this paper, we propose to learn a continuous representation for meshes with fixed topology, a common and practical setting in many faces-, hand-, and body-related applications. First, we split the template into multiple closed manifold genus-0 meshes so that each genus-0 mesh can be parameterized onto the unit sphere. Then we learn spherical implicit surface (SIS), which takes a spherical coordinate and a global feature or a set of local features around the coordinate as inputs, predicting the vertex corresponding to the coordinate as an output. Since the spherical coordinates are continuous, SIS can depict a mesh in an arbitrary resolution. SIS representation builds a bridge between discrete and continuous representation in 3D shapes. Specifically, we train SIS networks in a self-supervised manner for two tasks: a reconstruction task and a super-resolution task. Experiments show that our SIS representation is comparable with state-of-the-art methods that are specifically designed for meshes with a fixed resolution and significantly outperforms methods that work in arbitrary resolutions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As the most common representation for 3D shapes, mesh is often stored discretely with arrays of vertices and faces. However, 3D shapes in the real world are presented continuously. In this paper, we propose to learn a continuous representation for meshes with fixed topology, a common and practical setting in many faces-, hand-, and body-related applications. First, we split the template into multiple closed manifold genus-0 meshes so that each genus-0 mesh can be parameterized onto the unit sphere. Then we learn spherical implicit surface (SIS), which takes a spherical coordinate and a global feature or a set of local features around the coordinate as inputs, predicting the vertex corresponding to the coordinate as an output. Since the spherical coordinates are continuous, SIS can depict a mesh in an arbitrary resolution. SIS representation builds a bridge between discrete and continuous representation in 3D shapes. Specifically, we train SIS networks in a self-supervised manner for two tasks: a reconstruction task and a super-resolution task. Experiments show that our SIS representation is comparable with state-of-the-art methods that are specifically designed for meshes with a fixed resolution and significantly outperforms methods that work in arbitrary resolutions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As the most common representation for 3D shapes, mesh is often stored discretely with arrays of vertices and faces. However, 3D shapes in the real world are presented continuously. In this paper, we propose to learn a continuous representation for meshes with fixed topology, a common and practical setting in many faces-, hand-, and body-related applications. First, we split the template into multiple closed manifold genus-0 meshes so that each genus-0 mesh can be parameterized onto the unit sphere. Then we learn spherical implicit surface (SIS), which takes a spherical coordinate and a global feature or a set of local features around the coordinate as inputs, predicting the vertex corresponding to the coordinate as an output. Since the spherical coordinates are continuous, SIS can depict a mesh in an arbitrary resolution. SIS representation builds a bridge between discrete and continuous representation in 3D shapes. Specifically, we train SIS networks in a self-supervised manner for two tasks: a reconstruction task and a super-resolution task. Experiments show that our SIS representation is comparable with state-of-the-art methods that are specifically designed for meshes with a fixed resolution and significantly outperforms methods that work in arbitrary resolutions.",
"fno": "10042514",
"keywords": [
"Computational Geometry",
"Computer Graphics",
"Image Resolution",
"Learning Artificial Intelligence",
"Mesh Generation",
"Arbitrary Resolution",
"Body Related Applications",
"Common Representation",
"Common Setting",
"Continuous Mesh Representation",
"Continuous Representation",
"Fixed Resolution",
"Fixed Topology",
"Genus 0 Mesh",
"Global Feature",
"Local Features",
"Multiple Closed Manifold Genus 0 Meshes",
"Practical Setting",
"SIS Networks",
"SIS Representation",
"Spherical Coordinates",
"Spherical Implicit Surface",
"Super Resolution Task",
"Bridges",
"Manifolds",
"Surface Reconstruction",
"Three Dimensional Displays",
"Shape",
"Superresolution",
"Topology"
],
"authors": [
{
"affiliation": "AI Institute, Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence,Shanghai,China",
"fullName": "Zhongpai Gao",
"givenName": "Zhongpai",
"surname": "Gao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-01-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4544-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "10042711",
"articleId": "1KOuXNIuEgg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10042805",
"articleId": "1KOv5vclOWQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2014/6227/0/07042491",
"title": "YMCA — Your mesh comparison application",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042491/12OmNAndiiu",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a266",
"title": "Direct Spherical Parameterization Based on Surface Curvature",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a266/12OmNCeaPV9",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a119",
"title": "The Spherical Images of Triangular Mesh Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a119/12OmNCvLY0n",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-cg/2005/2473/0/24730301",
"title": "A New Approach of Progressive Spherical Parameterization",
"doi": null,
"abstractUrl": "/proceedings-article/cad-cg/2005/24730301/12OmNwp74GO",
"parentPublication": {
"id": "proceedings/cad-cg/2005/2473/0",
"title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a111",
"title": "Spherical Projective Displacement Mesh",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a111/12OmNxymo5Q",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/06025349",
"title": "Spherical DCB-Spline Surfaces with Hierarchical and Adaptive Knot Insertion",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/06025349/13rRUB7a110",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f667",
"title": "SphereSR: <tex>Z_$360^{\\circ}$_Z</tex> Image Super-Resolution with Arbitrary Projection via Continuous Spherical Image Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f667/1H1mQNFEXEQ",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995036",
"title": "Using Optimal Transport to Improve Spherical Harmonic Quantification of Complex Biological Shapes",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995036/1JC2vQBMSNW",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a573",
"title": "Drawing Network Visualizations on a Continuous, Spherical Surface",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a573/1rSRcY0ThyU",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a022",
"title": "Learning Delaunay Surface Elements for Mesh Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a022/1yeLGFONHoc",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1hQqfuoOyHu",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hVlL4sHcYw",
"doi": "10.1109/ICCV.2019.01006",
"title": "Deep Mesh Reconstruction From Single RGB Images via Topology Modification Networks",
"normalizedTitle": "Deep Mesh Reconstruction From Single RGB Images via Topology Modification Networks",
"abstract": "Reconstructing the 3D mesh of a general object from a single image is now possible thanks to the latest advances of deep learning technologies. However, due to the nontrivial difficulty of generating a feasible mesh structure, the state-of-the-art approaches often simplify the problem by learning the displacements of a template mesh that deforms it to the target surface. Though reconstructing a 3D shape with complex topology can be achieved by deforming multiple mesh patches, it remains difficult to stitch the results to ensure a high meshing quality. In this paper, we present an end-to-end single-view mesh reconstruction framework that is able to generate high-quality meshes with complex topologies from a single genus-0 template mesh. The key to our approach is a novel progressive shaping framework that alternates between mesh deformation and topology modification. While a deformation network predicts the per-vertex translations that reduce the gap between the reconstructed mesh and the ground truth, a novel topology modification network is employed to prune the error-prone faces, enabling the evolution of topology. By iterating over the two procedures, one can progressively modify the mesh topology while achieving higher reconstruction accuracy. Moreover, a boundary refinement network is designed to refine the boundary conditions to further improve the visual quality of the reconstructed mesh. Extensive experiments demonstrate that our approach outperforms the current state-of-the-art methods both qualitatively and quantitatively, especially for the shapes with complex topologies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Reconstructing the 3D mesh of a general object from a single image is now possible thanks to the latest advances of deep learning technologies. However, due to the nontrivial difficulty of generating a feasible mesh structure, the state-of-the-art approaches often simplify the problem by learning the displacements of a template mesh that deforms it to the target surface. Though reconstructing a 3D shape with complex topology can be achieved by deforming multiple mesh patches, it remains difficult to stitch the results to ensure a high meshing quality. In this paper, we present an end-to-end single-view mesh reconstruction framework that is able to generate high-quality meshes with complex topologies from a single genus-0 template mesh. The key to our approach is a novel progressive shaping framework that alternates between mesh deformation and topology modification. While a deformation network predicts the per-vertex translations that reduce the gap between the reconstructed mesh and the ground truth, a novel topology modification network is employed to prune the error-prone faces, enabling the evolution of topology. By iterating over the two procedures, one can progressively modify the mesh topology while achieving higher reconstruction accuracy. Moreover, a boundary refinement network is designed to refine the boundary conditions to further improve the visual quality of the reconstructed mesh. Extensive experiments demonstrate that our approach outperforms the current state-of-the-art methods both qualitatively and quantitatively, especially for the shapes with complex topologies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Reconstructing the 3D mesh of a general object from a single image is now possible thanks to the latest advances of deep learning technologies. However, due to the nontrivial difficulty of generating a feasible mesh structure, the state-of-the-art approaches often simplify the problem by learning the displacements of a template mesh that deforms it to the target surface. Though reconstructing a 3D shape with complex topology can be achieved by deforming multiple mesh patches, it remains difficult to stitch the results to ensure a high meshing quality. In this paper, we present an end-to-end single-view mesh reconstruction framework that is able to generate high-quality meshes with complex topologies from a single genus-0 template mesh. The key to our approach is a novel progressive shaping framework that alternates between mesh deformation and topology modification. While a deformation network predicts the per-vertex translations that reduce the gap between the reconstructed mesh and the ground truth, a novel topology modification network is employed to prune the error-prone faces, enabling the evolution of topology. By iterating over the two procedures, one can progressively modify the mesh topology while achieving higher reconstruction accuracy. Moreover, a boundary refinement network is designed to refine the boundary conditions to further improve the visual quality of the reconstructed mesh. Extensive experiments demonstrate that our approach outperforms the current state-of-the-art methods both qualitatively and quantitatively, especially for the shapes with complex topologies.",
"fno": "480300j963",
"keywords": [
"Image Colour Analysis",
"Image Reconstruction",
"Learning Artificial Intelligence",
"Mesh Generation",
"Neural Nets",
"Solid Modelling",
"Topology",
"Single RGB Images",
"Topology Modification Networks",
"Deep Learning Technologies",
"Mesh Structure",
"Complex Topology",
"Multiple Mesh Patches",
"High Meshing Quality",
"End To End Single View Mesh Reconstruction Framework",
"High Quality Meshes",
"Progressive Shaping Framework",
"Mesh Deformation",
"Deformation Network",
"Reconstructed Mesh",
"Topology Modification Network",
"Mesh Topology",
"Reconstruction Accuracy",
"Boundary Refinement Network",
"Single Genus 0 Template Mesh",
"Topology",
"Image Reconstruction",
"Three Dimensional Displays",
"Network Topology",
"Shape",
"Strain",
"Surface Reconstruction"
],
"authors": [
{
"affiliation": "South China University of Technology",
"fullName": "Junyi Pan",
"givenName": "Junyi",
"surname": "Pan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenzhen Research Institute of Big Data. the Chinese University of Hong Kong. Shenzhen",
"fullName": "Xiaoguang Han",
"givenName": "Xiaoguang",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Weikai Chen",
"givenName": "Weikai",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "South China University of Technology",
"fullName": "Jiapeng Tang",
"givenName": "Jiapeng",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "South China University of Technology",
"fullName": "Kui Jia",
"givenName": "Kui",
"surname": "Jia",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "9963-9972",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4803-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "480300j953",
"articleId": "1hQqlzHGOgo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "480300j973",
"articleId": "1hQquzZgtBC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icess/2016/3727/0/3727a131",
"title": "A Topology Structure Repair Algorithm for Triangular Mesh Model",
"doi": null,
"abstractUrl": "/proceedings-article/icess/2016/3727a131/12OmNAlvHsm",
"parentPublication": {
"id": "proceedings/icess/2016/3727/0",
"title": "2016 13th International Conference on Embedded Software and Systems (ICESS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8551",
"title": "Neural Template: Topology-aware Reconstruction and Disentangled Generation of 3D Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8551/1H0NuMEsMaQ",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0813",
"title": "Topology-Preserving Shape Reconstruction and Registration via Neural Diffeomorphic Flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0813/1H1hR5pweWc",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/10/09091324",
"title": "Topology Constrained Shape Correspondence",
"doi": null,
"abstractUrl": "/journal/tg/2021/10/09091324/1jK9L6UCoAo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-&-icivpr/2020/9331/0/09306533",
"title": "Towards Detailed 3D Modeling: Mesh Super-Resolution via Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-&-icivpr/2020/09306533/1qcicuvmpcQ",
"parentPublication": {
"id": "proceedings/iciev-&-icivpr/2020/9331/0",
"title": "2020 Joint 9th International Conference on Informatics, Electronics & Vision (ICIEV) and 2020 4th International Conference on Imaging, Vision & Pattern Recognition (icIVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a702",
"title": "GAMesh: Guided and Augmented Meshing for Deep Point Networks",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a702/1qyxjsT9mQE",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800b098",
"title": "Saliency Guided Subdivision for Single-View Mesh Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800b098/1qyxnB7fQvm",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09448418",
"title": "SkeletonNet: A Topology-Preserving Solution for Learning Mesh Reconstruction of Object Surfaces From RGB Images",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09448418/1ugE3VS21oY",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09632437",
"title": "STD-Net: Structure-Preserving and Topology-Adaptive Deformation Network for Single-View 3D Reconstruction",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09632437/1yYPmKqcmpq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a022",
"title": "Learning Delaunay Surface Elements for Mesh Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a022/1yeLGFONHoc",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1lPGXn8hEiI",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1lPGZdGhqIU",
"doi": "10.1109/CVPRW50498.2020.00494",
"title": "WISH: efficient 3D biological shape classification through Willmore flow and Spherical Harmonics decomposition",
"normalizedTitle": "WISH: efficient 3D biological shape classification through Willmore flow and Spherical Harmonics decomposition",
"abstract": "Shape analysis of cell nuclei, enabled by the recent advances in nano-scale digital imaging and reconstruction methods, is emerging as a very important tool to understand low-level biological processes. Current analysis techniques, however, are performed on 2D slices or assume very simple 3D shape approximations , limiting their discrimination capabilities. In this work, we introduce a compact rotation-invariant frequency-based representation of genus-0 3D shapes represented by manifold triangle meshes, that we apply to cell nuclei envelopes reconstructed from electron micrographs. The representation is robustly obtained through Spherical Harmonics coefficients over a spherical parameterization of the input mesh obtained through Willmore flow. Our results show how our method significantly improves the state-of-the-art in the classification of nuclear envelopes of rodent brain samples. Moreover, while our method is motivated by the analysis of specific biological shapes, the framework is of general use for the compact frequency encoding of any genus-0 surface.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Shape analysis of cell nuclei, enabled by the recent advances in nano-scale digital imaging and reconstruction methods, is emerging as a very important tool to understand low-level biological processes. Current analysis techniques, however, are performed on 2D slices or assume very simple 3D shape approximations , limiting their discrimination capabilities. In this work, we introduce a compact rotation-invariant frequency-based representation of genus-0 3D shapes represented by manifold triangle meshes, that we apply to cell nuclei envelopes reconstructed from electron micrographs. The representation is robustly obtained through Spherical Harmonics coefficients over a spherical parameterization of the input mesh obtained through Willmore flow. Our results show how our method significantly improves the state-of-the-art in the classification of nuclear envelopes of rodent brain samples. Moreover, while our method is motivated by the analysis of specific biological shapes, the framework is of general use for the compact frequency encoding of any genus-0 surface.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Shape analysis of cell nuclei, enabled by the recent advances in nano-scale digital imaging and reconstruction methods, is emerging as a very important tool to understand low-level biological processes. Current analysis techniques, however, are performed on 2D slices or assume very simple 3D shape approximations , limiting their discrimination capabilities. In this work, we introduce a compact rotation-invariant frequency-based representation of genus-0 3D shapes represented by manifold triangle meshes, that we apply to cell nuclei envelopes reconstructed from electron micrographs. The representation is robustly obtained through Spherical Harmonics coefficients over a spherical parameterization of the input mesh obtained through Willmore flow. Our results show how our method significantly improves the state-of-the-art in the classification of nuclear envelopes of rodent brain samples. Moreover, while our method is motivated by the analysis of specific biological shapes, the framework is of general use for the compact frequency encoding of any genus-0 surface.",
"fno": "09150932",
"keywords": [
"Brain",
"Computational Geometry",
"Computer Graphics",
"Image Classification",
"Image Morphing",
"Image Reconstruction",
"Medical Image Processing",
"Mesh Generation",
"Solid Modelling",
"WISH",
"Efficient 3 D Biological Shape Classification",
"Willmore Flow",
"Spherical Harmonics Decomposition",
"Shape Analysis",
"Reconstruction Methods",
"Low Level Biological Processes",
"Discrimination Capabilities",
"Compact Rotation Invariant Frequency",
"Genus 0 3 D Shapes",
"Manifold Triangle Meshes",
"Cell Nuclei Envelopes",
"Spherical Parameterization",
"Input Mesh",
"Specific Biological Shapes",
"Compact Frequency Encoding",
"Genus 0 Surface",
"3 D Shape Approximations",
"Shape",
"Three Dimensional Displays",
"Robustness",
"Harmonic Analysis",
"Manifolds",
"Nanobioscience"
],
"authors": [
{
"affiliation": "CSE - HBKU VIC - CRS4,Doha,Qatar Cagliari,Italy",
"fullName": "Marco Agus",
"givenName": "Marco",
"surname": "Agus",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "VIC - CRS4,Cagliari,Italy",
"fullName": "Enrico Gobbetti",
"givenName": "Enrico",
"surname": "Gobbetti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "VIC - CRS4,Cagliari,Italy",
"fullName": "Giovanni Pintore",
"givenName": "Giovanni",
"surname": "Pintore",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Istituto Di Neuroscienze Cavalieri Ottolenghi - UniTO,Torino,Italy",
"fullName": "Corrado Calì",
"givenName": "Corrado",
"surname": "Calì",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CSE - HBKU,Doha,Qatar",
"fullName": "Jens Schneider",
"givenName": "Jens",
"surname": "Schneider",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4184-4194",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9360-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09150810",
"articleId": "1lPHcyiKALu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09150812",
"articleId": "1lPHbVRO3NS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icece/2010/4031/0/4031c991",
"title": "Retrieval of 3D Models Based on Spherical Harmonics",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031c991/12OmNAObbI2",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2005/9331/0/01521371",
"title": "Spherical Harmonics Descriptor for 2D-Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2005/01521371/12OmNvT2oXL",
"parentPublication": {
"id": "proceedings/icme/2005/9331/0",
"title": "2005 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802098",
"title": "Particle dreams in spherical harmonics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802098/12OmNvjQ8GG",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2007/3009/0/30090248",
"title": "Efficient Spherical Harmonics Representation of 3D Objects",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2007/30090248/12OmNyKJiB7",
"parentPublication": {
"id": "proceedings/pg/2007/3009/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981721",
"title": "Activity related biometric authentication using Spherical Harmonics",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981721/12OmNz2kqrM",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/06/07927729",
"title": "Advanced Hierarchical Spherical Parameterizations",
"doi": null,
"abstractUrl": "/journal/tg/2018/06/07927729/13rRUy3xY2W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995036",
"title": "Using Optimal Transport to Improve Spherical Harmonic Quantification of Complex Biological Shapes",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995036/1JC2vQBMSNW",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2023/4544/0/10042514",
"title": "Learning Continuous Mesh Representation with Spherical Implicit Surface",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2023/10042514/1KOv3B65jAA",
"parentPublication": {
"id": "proceedings/fg/2023/4544/0",
"title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a047",
"title": "Effective Rotation-Invariant Point CNN with Spherical Harmonics Kernels",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a047/1ezREpXIpZC",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h819",
"title": "GLoSH: Global-Local Spherical Harmonics for Intrinsic Image Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h819/1hQqy771H9u",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tmhi3ly74c",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tmiWnqm35K",
"doi": "10.1109/ICPR48806.2021.9413214",
"title": "Facetwise Mesh Refinement for Multi- View Stereo",
"normalizedTitle": "Facetwise Mesh Refinement for Multi- View Stereo",
"abstract": "Mesh refinement is a fundamental step for accurate Multi- View Stereo. It modifies the geometry of an initial manifold mesh to minimize the photometric error induced in a set of camera pairs. This initial mesh is usually the output of volumetric 3D reconstruction based on min-cut over Delaunay Triangulations. Such methods produce a significant amount of non-manifold vertices, therefore they require a vertex split step to explicitly repair them. In this paper, we extend this method to preemptively fix the non-manifold vertices by reasoning directly on the Delaunay Triangulation and avoid most vertex splits. The main contribution of this paper addresses the problem of choosing the camera pairs adopted by the refinement process. We treat the problem as a mesh labeling process, where each label corresponds to a camera pair. Differently from the state-of-the-art methods, which use each camera pair to refine all the visible parts of the mesh, we choose, for each facet, the best pair that enforces both the overall visibility and coverage. The refinement step is applied for each facet using only the camera pair selected. This facetwise refinement helps the process to be applied in the most evenly way possible.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mesh refinement is a fundamental step for accurate Multi- View Stereo. It modifies the geometry of an initial manifold mesh to minimize the photometric error induced in a set of camera pairs. This initial mesh is usually the output of volumetric 3D reconstruction based on min-cut over Delaunay Triangulations. Such methods produce a significant amount of non-manifold vertices, therefore they require a vertex split step to explicitly repair them. In this paper, we extend this method to preemptively fix the non-manifold vertices by reasoning directly on the Delaunay Triangulation and avoid most vertex splits. The main contribution of this paper addresses the problem of choosing the camera pairs adopted by the refinement process. We treat the problem as a mesh labeling process, where each label corresponds to a camera pair. Differently from the state-of-the-art methods, which use each camera pair to refine all the visible parts of the mesh, we choose, for each facet, the best pair that enforces both the overall visibility and coverage. The refinement step is applied for each facet using only the camera pair selected. This facetwise refinement helps the process to be applied in the most evenly way possible.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mesh refinement is a fundamental step for accurate Multi- View Stereo. It modifies the geometry of an initial manifold mesh to minimize the photometric error induced in a set of camera pairs. This initial mesh is usually the output of volumetric 3D reconstruction based on min-cut over Delaunay Triangulations. Such methods produce a significant amount of non-manifold vertices, therefore they require a vertex split step to explicitly repair them. In this paper, we extend this method to preemptively fix the non-manifold vertices by reasoning directly on the Delaunay Triangulation and avoid most vertex splits. The main contribution of this paper addresses the problem of choosing the camera pairs adopted by the refinement process. We treat the problem as a mesh labeling process, where each label corresponds to a camera pair. Differently from the state-of-the-art methods, which use each camera pair to refine all the visible parts of the mesh, we choose, for each facet, the best pair that enforces both the overall visibility and coverage. The refinement step is applied for each facet using only the camera pair selected. This facetwise refinement helps the process to be applied in the most evenly way possible.",
"fno": "09413214",
"keywords": [
"Computational Geometry",
"Image Reconstruction",
"Mesh Generation",
"Photometry",
"Solid Modelling",
"Stereo Image Processing",
"Mesh Labeling",
"Camera Pair",
"Facetwise Mesh Refinement",
"Multiview Stereo",
"Volumetric 3 D Reconstruction",
"Delaunay Triangulation",
"Nonmanifold Vertices",
"Vertex Splits",
"Manifold Mesh",
"Geometry",
"Photometric Error",
"Min Cut",
"Manifolds",
"Geometry",
"Three Dimensional Displays",
"Pipelines",
"Maintenance Engineering",
"Cameras",
"Cognition"
],
"authors": [
{
"affiliation": "Politecnico di Milano,Italy",
"fullName": "Andrea Romanoni",
"givenName": "Andrea",
"surname": "Romanoni",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Politecnico di Milano,Italy",
"fullName": "Matteo Matteucci",
"givenName": "Matteo",
"surname": "Matteucci",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "6794-6801",
"year": "2021",
"issn": "1051-4651",
"isbn": "978-1-7281-8808-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09412601",
"articleId": "1tmhHmy5xx6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09412411",
"articleId": "1tmjWqRqDh6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2011/0394/0/05995576",
"title": "Topology-adaptive multi-view photometric stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995576/12OmNvBrgHF",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477650",
"title": "Automatic 3D reconstruction of manifold meshes via delaunay triangulation and mesh sweeping",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477650/12OmNwEJ0RT",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a706",
"title": "Multi-view Stereo with Single-View Semantic Mesh Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a706/12OmNy49sQd",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gmp/2002/1674/0/16740132",
"title": "Polyhedra Operators for Mesh Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/gmp/2002/16740132/12OmNzUgdiA",
"parentPublication": {
"id": "proceedings/gmp/2002/1674/0",
"title": "Geometric Modeling and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/02/09763061",
"title": "Pixel2Mesh++: 3D Mesh Generation and Refinement From Multi-View Images",
"doi": null,
"abstractUrl": "/journal/tp/2023/02/09763061/1CT4TqbFR9m",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2018/8497/0/849700a058",
"title": "Distributed Refinement of Large-Scale 3D Mesh for Accurate Multi-View Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2018/849700a058/1a3x5Nm8kAU",
"parentPublication": {
"id": "proceedings/icvrv/2018/8497/0",
"title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/09145719",
"title": "Scalable Mesh Refinement for Canonical Polygonal Schemas of Extremely High Genus Shapes",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/09145719/1lE0g1AYf28",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c036",
"title": "Mesh-Guided Multi-View Stereo With Pyramid Architecture",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c036/1m3nZpSzuaQ",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a710",
"title": "Dynamic Multi-Person Mesh Recovery From Uncalibrated Multi-View Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a710/1zWEdtn5VxS",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b290",
"title": "MeshMVS: Multi-View Stereo Guided Mesh Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b290/1zWEoi7ehZS",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyPQ4vC",
"title": "Virtual Reality Conference, IEEE",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqH9her",
"doi": "10.1109/VR.2000.840490",
"title": "Eye Mark Pointer in Immersive Projection Display",
"normalizedTitle": "Eye Mark Pointer in Immersive Projection Display",
"abstract": "We developed a pointer in 3D virtual space, using an eye tracking system as a sensor. The eye mark pointer is installed to a virtual environment system, which provides stereoscopic vision with an immersive projection display.The circular polarization stereoscopic vision enables us to use the eye tracking system in the immersive projection display. The eye tracking system obtains relative gaze directions with respect to the head, so the absolute position requires compensation of the user's head motion with a head tracker. We then compare the eye mark pointer with a joystick in an experiment with the virtual environment system.The experimental result indicates the pointing of the eye mark pointer is quicker by 9.8 times than that of the joystick, and suggests that the eye mark pointer is available for pointing at the target in the virtual environment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We developed a pointer in 3D virtual space, using an eye tracking system as a sensor. The eye mark pointer is installed to a virtual environment system, which provides stereoscopic vision with an immersive projection display.The circular polarization stereoscopic vision enables us to use the eye tracking system in the immersive projection display. The eye tracking system obtains relative gaze directions with respect to the head, so the absolute position requires compensation of the user's head motion with a head tracker. We then compare the eye mark pointer with a joystick in an experiment with the virtual environment system.The experimental result indicates the pointing of the eye mark pointer is quicker by 9.8 times than that of the joystick, and suggests that the eye mark pointer is available for pointing at the target in the virtual environment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We developed a pointer in 3D virtual space, using an eye tracking system as a sensor. The eye mark pointer is installed to a virtual environment system, which provides stereoscopic vision with an immersive projection display.The circular polarization stereoscopic vision enables us to use the eye tracking system in the immersive projection display. The eye tracking system obtains relative gaze directions with respect to the head, so the absolute position requires compensation of the user's head motion with a head tracker. We then compare the eye mark pointer with a joystick in an experiment with the virtual environment system.The experimental result indicates the pointing of the eye mark pointer is quicker by 9.8 times than that of the joystick, and suggests that the eye mark pointer is available for pointing at the target in the virtual environment.",
"fno": "04780125",
"keywords": [
"Immersive Projection Display",
"Eye Mark Pointer",
"Gaze Direction",
"Virtual Environment"
],
"authors": [
{
"affiliation": "National Institute of Multimedia Education",
"fullName": "Kikuo Asai",
"givenName": "Kikuo",
"surname": "Asai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Institute of Multimedia Education",
"fullName": "Noritaka Osawa",
"givenName": "Noritaka",
"surname": "Osawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Institute of Multimedia Education",
"fullName": "Hideaki Takahashi",
"givenName": "Hideaki",
"surname": "Takahashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Institute of Multimedia Education",
"fullName": "Yuji Y. Sugimoto",
"givenName": "Yuji Y.",
"surname": "Sugimoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Satoshi Yamazaki",
"givenName": "Satoshi",
"surname": "Yamazaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Masahiro Samejima",
"givenName": "Masahiro",
"surname": "Samejima",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Taiki Tanimae",
"givenName": "Taiki",
"surname": "Tanimae",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Solidray Co. Ltd.",
"givenName": "Solidray Co.",
"surname": "Ltd.",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-03-01T00:00:00",
"pubType": "proceedings",
"pages": "125",
"year": "2000",
"issn": "1087-8270",
"isbn": "0-7695-0478-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04780117",
"articleId": "12OmNzFMFmv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04780135",
"articleId": "12OmNxRF6UO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyugyEU",
"title": "2015 Computer Games: AI, Animation, Mobile, Multimedia, Educational and Serious Games (CGAMES)",
"acronym": "cgames",
"groupId": "1800470",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx3HI9q",
"doi": "10.1109/CGames.2015.7272954",
"title": "Where do they look at? Analysis of gaze interaction in children while playing a puzzle game",
"normalizedTitle": "Where do they look at? Analysis of gaze interaction in children while playing a puzzle game",
"abstract": "In recent years the usage of video-games in education, therapies and training, has risen sharply. The adaptation of serious games to users' needs may offer new enriching learning environments. Eye tracking sensors collect information about the location and duration of eye movements within a specific area on a computer monitor. This paper presents a usability analysis of users' eye movements while using the set of puzzle games. For this purpose a set of data collected from 63 children with an average age of 9.95 (SD = 1.21) and different attention skills is analyzed during their interaction with a set of puzzle games. Fixation data is extracted and analyzed as regards of where do they look first and most. These resources should be complemented with other interaction records, but they are interesting for creating optimized user interfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years the usage of video-games in education, therapies and training, has risen sharply. The adaptation of serious games to users' needs may offer new enriching learning environments. Eye tracking sensors collect information about the location and duration of eye movements within a specific area on a computer monitor. This paper presents a usability analysis of users' eye movements while using the set of puzzle games. For this purpose a set of data collected from 63 children with an average age of 9.95 (SD = 1.21) and different attention skills is analyzed during their interaction with a set of puzzle games. Fixation data is extracted and analyzed as regards of where do they look first and most. These resources should be complemented with other interaction records, but they are interesting for creating optimized user interfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years the usage of video-games in education, therapies and training, has risen sharply. The adaptation of serious games to users' needs may offer new enriching learning environments. Eye tracking sensors collect information about the location and duration of eye movements within a specific area on a computer monitor. This paper presents a usability analysis of users' eye movements while using the set of puzzle games. For this purpose a set of data collected from 63 children with an average age of 9.95 (SD = 1.21) and different attention skills is analyzed during their interaction with a set of puzzle games. Fixation data is extracted and analyzed as regards of where do they look first and most. These resources should be complemented with other interaction records, but they are interesting for creating optimized user interfaces.",
"fno": "07272954",
"keywords": [
"Games",
"Sensors",
"Computers",
"Gaze Tracking",
"Monitoring",
"Usability",
"Visualization"
],
"authors": [
{
"affiliation": "Deustotech Life [eVIDA], University of Deusto, Avda Universidades 24, Bilbao, Spain",
"fullName": "Maite Frutos-Pascual",
"givenName": "Maite",
"surname": "Frutos-Pascual",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Deustotech Life [eVIDA], University of Deusto, Avda Universidades 24, Bilbao, Spain",
"fullName": "Begonya Garcia-Zapirain",
"givenName": "Begonya",
"surname": "Garcia-Zapirain",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computing Information, University of Wolverhampton, WV1 1EL, United Kingdom",
"fullName": "Quasim H. Mehdi",
"givenName": "Quasim H.",
"surname": "Mehdi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cgames",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-07-01T00:00:00",
"pubType": "proceedings",
"pages": "103-106",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7921-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07272953",
"articleId": "12OmNvkGW2L",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07272955",
"articleId": "12OmNC2fGt8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgames/2014/5854/0/06934137",
"title": "Guided crossword-puzzle games aimed at children with Attentional Deficit: Preliminary results",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2014/06934137/12OmNCu4nbC",
"parentPublication": {
"id": "proceedings/cgames/2014/5854/0",
"title": "2014 Computer Games: AI, Animation, Mobile, Multimedia, Educational and Serious Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926684",
"title": "A Statistical Approach to Continuous Self-Calibrating Eye Gaze Tracking for Head-Mounted Virtual Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926684/12OmNvlxJrb",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2017/5812/0/08056614",
"title": "Serious gaze",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2017/08056614/12OmNwDACge",
"parentPublication": {
"id": "proceedings/vs-games/2017/5812/0",
"title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2011/1451/0/06000327",
"title": "Gaze tracking as a game input interface",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2011/06000327/12OmNxRWIeo",
"parentPublication": {
"id": "proceedings/cgames/2011/1451/0",
"title": "2011 16th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2013/2945/0/06671261",
"title": "Kaidan: An outdoor AR puzzle adventure game",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2013/06671261/12OmNyQph1n",
"parentPublication": {
"id": "proceedings/ismar-amh/2013/2945/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2017/6724/0/07926555",
"title": "Gaze Tracking and Object Recognition from Eye Images",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2017/07926555/12OmNzvz6Lc",
"parentPublication": {
"id": "proceedings/irc/2017/6724/0",
"title": "2017 First IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a594",
"title": "High-speed Gaze-oriented Projection by Cross-ratio-based Eye Tracking with Dual Infrared Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a594/1CJewqWywOk",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/emip/2022/9289/0/928900a008",
"title": "Entropy of Eye Movements While Reading Code or Text",
"doi": null,
"abstractUrl": "/proceedings-article/emip/2022/928900a008/1ED1UZAQKME",
"parentPublication": {
"id": "proceedings/emip/2022/9289/0",
"title": "2022 IEEE/ACM 10th International Workshop on Eye Movements in Programming (EMIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/10066496",
"title": "GazePair: Efficient Pairing of Augmented Reality Devices Using Gaze Tracking",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/10066496/1LtR5ck4LAI",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsecs-icocsim/2021/1407/0/140700a291",
"title": "Effects of Eye Health Among Youngster While Playing Computer Game",
"doi": null,
"abstractUrl": "/proceedings-article/icsecs-icocsim/2021/140700a291/1wYlxKXHK80",
"parentPublication": {
"id": "proceedings/icsecs-icocsim/2021/1407/0",
"title": "2021 International Conference on Software Engineering & Computer Systems and 4th International Conference on Computational Science and Information Management (ICSECS-ICOCSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxE2mTD",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz2C1or",
"doi": "10.1109/ICCVW.2015.107",
"title": "Depth Compensation Model for Gaze Estimation in Sport Analysis",
"normalizedTitle": "Depth Compensation Model for Gaze Estimation in Sport Analysis",
"abstract": "A depth compensation model is presented as a novel approach to reduce the effects of parallax error for head-mounted eye trackers. The method can reduce the parallax error when the distance between the user and the target is prior known. The model is geometrically presented and its performance is tested in a totally controlled environment with aim to check the influences of eye tracker parameters and ocular biometric parameters on its behavior. We also present a gaze estimation method based on epipolar geometry for binocular eye tracking setups. The depth compensation model has shown very promising to the field of eye tracking. It can reduce 10 times less the influence of parallax error in multiple depth planes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A depth compensation model is presented as a novel approach to reduce the effects of parallax error for head-mounted eye trackers. The method can reduce the parallax error when the distance between the user and the target is prior known. The model is geometrically presented and its performance is tested in a totally controlled environment with aim to check the influences of eye tracker parameters and ocular biometric parameters on its behavior. We also present a gaze estimation method based on epipolar geometry for binocular eye tracking setups. The depth compensation model has shown very promising to the field of eye tracking. It can reduce 10 times less the influence of parallax error in multiple depth planes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A depth compensation model is presented as a novel approach to reduce the effects of parallax error for head-mounted eye trackers. The method can reduce the parallax error when the distance between the user and the target is prior known. The model is geometrically presented and its performance is tested in a totally controlled environment with aim to check the influences of eye tracker parameters and ocular biometric parameters on its behavior. We also present a gaze estimation method based on epipolar geometry for binocular eye tracking setups. The depth compensation model has shown very promising to the field of eye tracking. It can reduce 10 times less the influence of parallax error in multiple depth planes.",
"fno": "5720a788",
"keywords": [
"Cameras",
"Gaze Tracking",
"Calibration",
"Training",
"Estimation",
"Geometry",
"Transmission Line Matrix Methods"
],
"authors": [
{
"affiliation": null,
"fullName": "Fabricio Batista Narcizo",
"givenName": "Fabricio Batista",
"surname": "Narcizo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dan Witzner Hansen",
"givenName": "Dan Witzner",
"surname": "Hansen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "788-795",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-9711-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5720a780",
"articleId": "12OmNzFMFpF",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5720a796",
"articleId": "12OmNrHjqLu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isuvr/2017/3091/0/3091a026",
"title": "Estimating Gaze Depth Using Multi-Layer Perceptron",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2017/3091a026/12OmNAkWvFD",
"parentPublication": {
"id": "proceedings/isuvr/2017/3091/0",
"title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995675",
"title": "Probabilistic gaze estimation without active personal calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995675/12OmNC8MsAV",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a351",
"title": "Gaze Estimation Using Human Joint Rotation Angel",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a351/12OmNx57HJj",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2018/6857/0/08643332",
"title": "Open framework for error-compensated gaze data collection with eye tracking glasses",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2018/08643332/17QjJdei3Y0",
"parentPublication": {
"id": "proceedings/ism/2018/6857/0",
"title": "2018 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisai/2021/0692/0/069200a651",
"title": "Gaze Estimation Based on Difference Residual Network",
"doi": null,
"abstractUrl": "/proceedings-article/cisai/2021/069200a651/1BmOqegCHjG",
"parentPublication": {
"id": "proceedings/cisai/2021/0692/0",
"title": "2021 International Conference on Computer Information Science and Artificial Intelligence (CISAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873986",
"title": "Weighted Pointer: Error-aware Gaze-based Interaction through Fallback Modalities",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873986/1GjwNuaj2ms",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2019/9151/0/08730846",
"title": "Gaze Estimation Using Residual Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2019/08730846/1aDSMwUBvBS",
"parentPublication": {
"id": "proceedings/percom-workshops/2019/9151/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2019/3263/0/08747333",
"title": "Depth From Texture Integration",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2019/08747333/1bcJvSXKrEA",
"parentPublication": {
"id": "proceedings/iccp/2019/3263/0",
"title": "2019 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090650",
"title": "Depth Augmented Omnidirectional Stereo for 6-DoF VR Photography",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090650/1jIxi5ANPS8",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvidl/2020/9481/0/948100a596",
"title": "Research on Kinect Calibration and Depth Error Compensation Based on BP Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvidl/2020/948100a596/1pbebnZ7Uje",
"parentPublication": {
"id": "proceedings/cvidl/2020/9481/0",
"title": "2020 International Conference on Computer Vision, Image and Deep Learning (CVIDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKiq0",
"title": "2018 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"acronym": "percomw",
"groupId": "1000552",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45VVho5z",
"doi": "10.1109/PERCOMW.2018.8480159",
"title": "Towards Gaze-Based Mobile Device Interaction for the Disabled",
"normalizedTitle": "Towards Gaze-Based Mobile Device Interaction for the Disabled",
"abstract": "Manual input is the dominant way to interact with mobile devices in everyday life. Innovations like multi-touch displays and virtual keyboards that allow user input by sliding a finger over the letters make this input more and more convenient. However, these systems assume that users have full control over their hand movements. This assumption excludes millions of people who suffer from loss of extremities, paralyses, or spasticities. In this paper, we motivate the urgent need for input mechanisms that make these technologies available for everybody. We focus on gaze-based interaction as eye movements can be tracked by built-in cameras of unmodified mobile devices without any further hardware requirements. As accurate point of gaze estimation on mobile devices is nontrivial, we propose a middleware that is not based on the absolute position of the pupils but rather takes eye movement patterns into account. This paper has four contributions. First, we motivate the need for alternative inputs methods besides manual input. Second, we discuss existing approaches and reveal their limitations. Third, we propose a middleware that allows gaze-based user interaction with mobile devices. Fourth, we provide a framework that allows developers to easily integrate gaze-based control into mobile applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Manual input is the dominant way to interact with mobile devices in everyday life. Innovations like multi-touch displays and virtual keyboards that allow user input by sliding a finger over the letters make this input more and more convenient. However, these systems assume that users have full control over their hand movements. This assumption excludes millions of people who suffer from loss of extremities, paralyses, or spasticities. In this paper, we motivate the urgent need for input mechanisms that make these technologies available for everybody. We focus on gaze-based interaction as eye movements can be tracked by built-in cameras of unmodified mobile devices without any further hardware requirements. As accurate point of gaze estimation on mobile devices is nontrivial, we propose a middleware that is not based on the absolute position of the pupils but rather takes eye movement patterns into account. This paper has four contributions. First, we motivate the need for alternative inputs methods besides manual input. Second, we discuss existing approaches and reveal their limitations. Third, we propose a middleware that allows gaze-based user interaction with mobile devices. Fourth, we provide a framework that allows developers to easily integrate gaze-based control into mobile applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Manual input is the dominant way to interact with mobile devices in everyday life. Innovations like multi-touch displays and virtual keyboards that allow user input by sliding a finger over the letters make this input more and more convenient. However, these systems assume that users have full control over their hand movements. This assumption excludes millions of people who suffer from loss of extremities, paralyses, or spasticities. In this paper, we motivate the urgent need for input mechanisms that make these technologies available for everybody. We focus on gaze-based interaction as eye movements can be tracked by built-in cameras of unmodified mobile devices without any further hardware requirements. As accurate point of gaze estimation on mobile devices is nontrivial, we propose a middleware that is not based on the absolute position of the pupils but rather takes eye movement patterns into account. This paper has four contributions. First, we motivate the need for alternative inputs methods besides manual input. Second, we discuss existing approaches and reveal their limitations. Third, we propose a middleware that allows gaze-based user interaction with mobile devices. Fourth, we provide a framework that allows developers to easily integrate gaze-based control into mobile applications.",
"fno": "08480159",
"keywords": [
"Mobile Handsets",
"Performance Evaluation",
"Cameras",
"Manuals",
"Market Research",
"Gaze Tracking",
"Privacy"
],
"authors": [
{
"affiliation": "University of Mannheim, Schloss, Mannheim, 68161, Germany",
"fullName": "Anton Wachner",
"givenName": "Anton",
"surname": "Wachner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Mannheim, Schloss, Mannheim, 68161, Germany",
"fullName": "Janick Edinger",
"givenName": "Janick",
"surname": "Edinger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Mannheim, Schloss, Mannheim, 68161, Germany",
"fullName": "Christian Becker",
"givenName": "Christian",
"surname": "Becker",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "percomw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "397-402",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3227-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08480227",
"articleId": "17D45W9KVFf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08480345",
"articleId": "17D45Wc1IKs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmi/2002/1834/0/18340261",
"title": "Active Gaze Tracking for Human-Robot Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/icmi/2002/18340261/12OmNAGNCeq",
"parentPublication": {
"id": "proceedings/icmi/2002/1834/0",
"title": "Proceedings Fourth IEEE International Conference on Multimodal Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a789",
"title": "Human Computer Interaction with Head Pose, Eye Gaze and Body Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a789/12OmNASILS4",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2007/3056/0/30560280",
"title": "Interaction Without Gesture or Speech -- A Gaze Controlled AR System",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2007/30560280/12OmNCcKQtv",
"parentPublication": {
"id": "proceedings/icat/2007/3056/0",
"title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sp/2018/4353/0/435301a144",
"title": "EyeTell: Video-Assisted Touchscreen Keystroke Inference from Eye Movements",
"doi": null,
"abstractUrl": "/proceedings-article/sp/2018/435301a144/12OmNzC5SIa",
"parentPublication": {
"id": "proceedings/sp/2018/4353/0",
"title": "2018 IEEE Symposium on Security and Privacy (SP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ihmsc/2010/4151/1/4151a300",
"title": "A Novel Simple 2D Model of Eye Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/ihmsc/2010/4151a300/12OmNzQR1nK",
"parentPublication": {
"id": "proceedings/ihmsc/2010/4151/1",
"title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09706357",
"title": "Towards High Performance Low Complexity Calibration in Appearance Based Gaze Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09706357/1AO2a7pgNPO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09802919",
"title": "Continuous Gaze Tracking With Implicit Saliency-Aware Calibration on Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09802919/1Eo1vvDggH6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873986",
"title": "Weighted Pointer: Error-aware Gaze-based Interaction through Fallback Modalities",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873986/1GjwNuaj2ms",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2019/9151/0/08730846",
"title": "Gaze Estimation Using Residual Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2019/08730846/1aDSMwUBvBS",
"parentPublication": {
"id": "proceedings/percom-workshops/2019/9151/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2019/5606/0/560600a233",
"title": "A Scrolling Approach for Gaze-Based Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2019/560600a233/1gFJgv5B5O8",
"parentPublication": {
"id": "proceedings/ism/2019/5606/0",
"title": "2019 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1aDSuDp9DuU",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"acronym": "percom-workshops",
"groupId": "1000552",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1aDSMwUBvBS",
"doi": "10.1109/PERCOMW.2019.8730846",
"title": "Gaze Estimation Using Residual Neural Network",
"normalizedTitle": "Gaze Estimation Using Residual Neural Network",
"abstract": "Eye gaze tracking has become an prominent research topic in human-computer interaction and computer vision. It is due to its application in numerous fields, such as the market research, medical, neuroscience and psychology. Eye gaze tracking is implemented by estimating gaze (gaze estimation) for each individual frame in offline or real-time video captured. Therefore, in order to produce the secure the accurate tracking, especially in the emerging use in medical and community, innovation on the gaze estimation posts a challenge in research field. In this paper, we explored the use of the deep learning model, Residual Neural Network (ResNet-18), to predict the eye gaze on mobile device. The model is trained using the large-scale eye tracking public dataset called GazeCapture. We aim to innovate by incorporating methods/techniques of removing the blinking data, applying image histogram normalisation, head pose, and face grid features. As a result, we achieved 3.05cm average error, which is better performance than iTracker (4.11cm average error), the recent gaze tracking deep-learning model using AlexNet architecture. Upon observation, adaptive normalisation of the images was found to produce better results compared to histogram normalisation. Additionally, we found that head pose information was useful contribution to the proposed deep-learning network, while face grid information does not help to reduce test error.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Eye gaze tracking has become an prominent research topic in human-computer interaction and computer vision. It is due to its application in numerous fields, such as the market research, medical, neuroscience and psychology. Eye gaze tracking is implemented by estimating gaze (gaze estimation) for each individual frame in offline or real-time video captured. Therefore, in order to produce the secure the accurate tracking, especially in the emerging use in medical and community, innovation on the gaze estimation posts a challenge in research field. In this paper, we explored the use of the deep learning model, Residual Neural Network (ResNet-18), to predict the eye gaze on mobile device. The model is trained using the large-scale eye tracking public dataset called GazeCapture. We aim to innovate by incorporating methods/techniques of removing the blinking data, applying image histogram normalisation, head pose, and face grid features. As a result, we achieved 3.05cm average error, which is better performance than iTracker (4.11cm average error), the recent gaze tracking deep-learning model using AlexNet architecture. Upon observation, adaptive normalisation of the images was found to produce better results compared to histogram normalisation. Additionally, we found that head pose information was useful contribution to the proposed deep-learning network, while face grid information does not help to reduce test error.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Eye gaze tracking has become an prominent research topic in human-computer interaction and computer vision. It is due to its application in numerous fields, such as the market research, medical, neuroscience and psychology. Eye gaze tracking is implemented by estimating gaze (gaze estimation) for each individual frame in offline or real-time video captured. Therefore, in order to produce the secure the accurate tracking, especially in the emerging use in medical and community, innovation on the gaze estimation posts a challenge in research field. In this paper, we explored the use of the deep learning model, Residual Neural Network (ResNet-18), to predict the eye gaze on mobile device. The model is trained using the large-scale eye tracking public dataset called GazeCapture. We aim to innovate by incorporating methods/techniques of removing the blinking data, applying image histogram normalisation, head pose, and face grid features. As a result, we achieved 3.05cm average error, which is better performance than iTracker (4.11cm average error), the recent gaze tracking deep-learning model using AlexNet architecture. Upon observation, adaptive normalisation of the images was found to produce better results compared to histogram normalisation. Additionally, we found that head pose information was useful contribution to the proposed deep-learning network, while face grid information does not help to reduce test error.",
"fno": "08730846",
"keywords": [
"Computer Vision",
"Gaze Tracking",
"Image Capture",
"Learning Artificial Intelligence",
"Neural Nets",
"Pose Estimation",
"Residual Neural Network",
"Eye Gaze Tracking",
"Human Computer Interaction",
"Computer Vision",
"Neuroscience",
"Accurate Tracking",
"Deep Learning Model",
"Deep Learning Network",
"Gaze Estimation",
"Image Histogram Normalisation",
"Head Pose",
"Face Grid Features",
"Face",
"Estimation",
"Gaze Tracking",
"Histograms",
"Training",
"Ear",
"Eye Track",
"Mobile",
"Res Net",
"Deep Learning"
],
"authors": [
{
"affiliation": "Nanyang Technological University, School of Computer Science and Engineering), Singapore",
"fullName": "En Teng Wong",
"givenName": "En Teng",
"surname": "Wong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University, School of Computer Science and Engineering), Singapore",
"fullName": "Seanglidet Yean",
"givenName": "Seanglidet",
"surname": "Yean",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University, School of Computer Science and Engineering), Singapore",
"fullName": "Qingyao Hu",
"givenName": "Qingyao",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University, School of Computer Science and Engineering), Singapore",
"fullName": "Bu Sung Lee",
"givenName": "Bu Sung",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University, School of Computer Science and Engineering), Singapore",
"fullName": "Jigang Liu",
"givenName": "Jigang",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University, School of Computer Science and Engineering), Singapore",
"fullName": "Rajan Deepu",
"givenName": "Rajan",
"surname": "Deepu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "percom-workshops",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "411-414",
"year": "2019",
"issn": null,
"isbn": "978-1-5386-9151-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08730856",
"articleId": "1aDSv3zWo7K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08730570",
"articleId": "1aDSI9HrVe0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2011/0394/0/05995675",
"title": "Probabilistic gaze estimation without active personal calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995675/12OmNC8MsAV",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2010/4249/0/4249a048",
"title": "Implementation and Optimization of the Eye Gaze Tracking System Based on DM642",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2010/4249a048/12OmNs4S8I4",
"parentPublication": {
"id": "proceedings/icinis/2010/4249/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2011/1451/0/06000327",
"title": "Gaze tracking as a game input interface",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2011/06000327/12OmNxRWIeo",
"parentPublication": {
"id": "proceedings/cgames/2011/1451/0",
"title": "2011 16th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wkdd/2009/3543/0/3543a594",
"title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn",
"parentPublication": {
"id": "proceedings/wkdd/2009/3543/0",
"title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/02/07414495",
"title": "Fauxvea: Crowdsourcing Gaze Location Estimates for Visualization Analysis Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2017/02/07414495/13rRUwInvyE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956312",
"title": "A Joint Cascaded Framework for Simultaneous Eye State, Eye Center, and Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956312/1IHq8em8jug",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a375",
"title": "Neural 3D Gaze: 3D Pupil Localization and Gaze Tracking based on Anatomical Eye Model and Neural Refraction Correction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a375/1JrQRCijhMk",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a655",
"title": "A Multi-Modal Gaze Tracking Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a655/1ehBL8sk06I",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300d660",
"title": "U2Eyes: A Binocular Dataset for Eye Tracking and Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300d660/1i5mrEVhtbq",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412205",
"title": "Adaptive Feature Fusion Network for Gaze Tracking in Mobile Tablets",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412205/1tmjcNMinsc",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyen1y3",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"acronym": "aciiw",
"groupId": "1823084",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBd9T0n",
"doi": "10.1109/ACIIW.2017.8272594",
"title": "Deep breaths: An internally- and externally-paced deep breathing guide",
"normalizedTitle": "Deep breaths: An internally- and externally-paced deep breathing guide",
"abstract": "Deep breathing is a simple and intuitive technique for reducing stress, but requires familiarity with breathing exercises and suitable breathing parameters. We present Deep Breaths, a mobile tool that allows users to experiment with various respiratory pacing signals in order to maximize relaxation. Deep Breaths provides a stationary (i.e., clock-based) pacing signal as well as an adaptive pacing signal that follows fluctuations in the users heart rate. Deep Breaths also provides real-time visualizations of various standard measures of relaxation. This demonstration aims to illustrate how our system can be used for relaxation training.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Deep breathing is a simple and intuitive technique for reducing stress, but requires familiarity with breathing exercises and suitable breathing parameters. We present Deep Breaths, a mobile tool that allows users to experiment with various respiratory pacing signals in order to maximize relaxation. Deep Breaths provides a stationary (i.e., clock-based) pacing signal as well as an adaptive pacing signal that follows fluctuations in the users heart rate. Deep Breaths also provides real-time visualizations of various standard measures of relaxation. This demonstration aims to illustrate how our system can be used for relaxation training.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Deep breathing is a simple and intuitive technique for reducing stress, but requires familiarity with breathing exercises and suitable breathing parameters. We present Deep Breaths, a mobile tool that allows users to experiment with various respiratory pacing signals in order to maximize relaxation. Deep Breaths provides a stationary (i.e., clock-based) pacing signal as well as an adaptive pacing signal that follows fluctuations in the users heart rate. Deep Breaths also provides real-time visualizations of various standard measures of relaxation. This demonstration aims to illustrate how our system can be used for relaxation training.",
"fno": "08272594",
"keywords": [
"Heart Rate Variability",
"Biological Control Systems",
"Stress",
"Heart Beat",
"Tools",
"Visualization"
],
"authors": [
{
"affiliation": "Department of Computer Science and Engineering, Texas A&M University, College Station, Texas, USA",
"fullName": "Adam Hair",
"givenName": "Adam",
"surname": "Hair",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Texas A&M University, College Station, Texas, USA",
"fullName": "Ricardo Gutierrez-Osuna",
"givenName": "Ricardo",
"surname": "Gutierrez-Osuna",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aciiw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "85-87",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-0680-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08272593",
"articleId": "12OmNAgGwfX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08272595",
"articleId": "12OmNrMHOgh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmla/2016/6167/0/07838258",
"title": "Premature Ventricular Contraction Beat Detection with Deep Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icmla/2016/07838258/12OmNCdk2wz",
"parentPublication": {
"id": "proceedings/icmla/2016/6167/0",
"title": "2016 15th IEEE International Conference on Machine Learning and Applications (ICMLA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdata-congress/2014/5057/0/06906817",
"title": "A Rule-Based Temporal Analysis Method for Online Health Analytics and Its Application for Real-Time Detection of Neonatal Spells",
"doi": null,
"abstractUrl": "/proceedings-article/bigdata-congress/2014/06906817/12OmNs0kytS",
"parentPublication": {
"id": "proceedings/bigdata-congress/2014/5057/0",
"title": "2014 IEEE International Congress on Big Data (BigData Congress)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2017/0563/0/08273661",
"title": "Wear your heart on your sleeve: Visible psychophysiology for contextualized relaxation",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2017/08273661/12OmNwKoZdU",
"parentPublication": {
"id": "proceedings/acii/2017/0563/0",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cerma/2008/3320/0/3320a592",
"title": "Processing of ECG and Breathing Signals to Study the Correlation of Respiration Waveform Time Intervals with HF and LF Powers of Heart Rate Variability",
"doi": null,
"abstractUrl": "/proceedings-article/cerma/2008/3320a592/12OmNx3HI3C",
"parentPublication": {
"id": "proceedings/cerma/2008/3320/0",
"title": "Electronics, Robotics and Automotive Mechanics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rvsp/2013/3184/0/3184a182",
"title": "A Stress Evaluation and Personal Relaxation System Based on Measurement of Photoplethysmography",
"doi": null,
"abstractUrl": "/proceedings-article/rvsp/2013/3184a182/12OmNxd4tnP",
"parentPublication": {
"id": "proceedings/rvsp/2013/3184/0",
"title": "2013 Second International Conference on Robot, Vision and Signal Processing (RVSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2021/01/08400398",
"title": "Partial Reinforcement in Game Biofeedback for Relaxation Training",
"doi": null,
"abstractUrl": "/journal/ta/2021/01/08400398/13rRUEgarzS",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2019/02/07930503",
"title": "Visual Biofeedback and Game Adaptation in Relaxation Skill Transfer",
"doi": null,
"abstractUrl": "/journal/ta/2019/02/07930503/13rRUwInvdq",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2016/02/07164293",
"title": "ReBreathe: A Calibration Protocol that Improves Stress/Relax Classification by Relabeling Deep Breathing Relaxation Exercises",
"doi": null,
"abstractUrl": "/journal/ta/2016/02/07164293/13rRUxASufL",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsd/2018/7377/0/737700a421",
"title": "Inter-Patient ECG Classification Using Deep Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/dsd/2018/737700a421/17D45Xh13wQ",
"parentPublication": {
"id": "proceedings/dsd/2018/7377/0",
"title": "2018 21st Euromicro Conference on Digital System Design (DSD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2020/03/08319498",
"title": "Gaming Away Stress: Using Biofeedback Games to Learn Paced Breathing",
"doi": null,
"abstractUrl": "/journal/ta/2020/03/08319498/1mhPDEEhlLi",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJdXjsLKBG",
"doi": "10.1109/VRW55335.2022.00008",
"title": "A Cardboard-Based Virtual Reality Study on Self-Avatar Appearance and Breathing",
"normalizedTitle": "A Cardboard-Based Virtual Reality Study on Self-Avatar Appearance and Breathing",
"abstract": "Cardboard-based virtual reality is an affordable solution for experiencing virtual reality content. Particularly during the COVID-19 pandemic, several studies used cardboard-based virtual reality remotely to minimize viral spread. We conducted a study to explore the potentials of low-cost virtual reality on participants' sense of presence and body ownership illusion in our research lab, thereby providing a controlled research setting. Our 2 (Avatar: realistic vs. mannequin self-avatar) × 2 (Breathing; breathing vs. no breathing motion) study investigated presence and body ownership when participants were instructed to observe a virtual environment passively through a cardboard-based virtual reality application while being embodied as a self-avatar. Our study's results indicated that: (1) the mannequin self-avatar exerted a stronger effect on participants' presence; (2) younger participants who experienced the mannequin avatar reported stronger body ownership compared with older participants; and (3) while experiencing a mannequin avatar with no breathing motion, participants with prior VR experience reported higher body ownership illusion compared with participants with no prior VR experience. In this paper, we discuss our findings, as well as the study's limitations and future research directions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cardboard-based virtual reality is an affordable solution for experiencing virtual reality content. Particularly during the COVID-19 pandemic, several studies used cardboard-based virtual reality remotely to minimize viral spread. We conducted a study to explore the potentials of low-cost virtual reality on participants' sense of presence and body ownership illusion in our research lab, thereby providing a controlled research setting. Our 2 (Avatar: realistic vs. mannequin self-avatar) × 2 (Breathing; breathing vs. no breathing motion) study investigated presence and body ownership when participants were instructed to observe a virtual environment passively through a cardboard-based virtual reality application while being embodied as a self-avatar. Our study's results indicated that: (1) the mannequin self-avatar exerted a stronger effect on participants' presence; (2) younger participants who experienced the mannequin avatar reported stronger body ownership compared with older participants; and (3) while experiencing a mannequin avatar with no breathing motion, participants with prior VR experience reported higher body ownership illusion compared with participants with no prior VR experience. In this paper, we discuss our findings, as well as the study's limitations and future research directions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cardboard-based virtual reality is an affordable solution for experiencing virtual reality content. Particularly during the COVID-19 pandemic, several studies used cardboard-based virtual reality remotely to minimize viral spread. We conducted a study to explore the potentials of low-cost virtual reality on participants' sense of presence and body ownership illusion in our research lab, thereby providing a controlled research setting. Our 2 (Avatar: realistic vs. mannequin self-avatar) × 2 (Breathing; breathing vs. no breathing motion) study investigated presence and body ownership when participants were instructed to observe a virtual environment passively through a cardboard-based virtual reality application while being embodied as a self-avatar. Our study's results indicated that: (1) the mannequin self-avatar exerted a stronger effect on participants' presence; (2) younger participants who experienced the mannequin avatar reported stronger body ownership compared with older participants; and (3) while experiencing a mannequin avatar with no breathing motion, participants with prior VR experience reported higher body ownership illusion compared with participants with no prior VR experience. In this paper, we discuss our findings, as well as the study's limitations and future research directions.",
"fno": "840200a001",
"keywords": [
"Avatars",
"Virtual Environment",
"Cardboard Based Virtual Reality Application",
"Mannequin Avatar",
"Stronger Body Ownership",
"Higher Body Ownership Illusion",
"Self Avatar Appearance",
"Virtual Reality Content",
"Low Cost Virtual Reality",
"COVID 19 Pandemic",
"Participant Sense Of Presence",
"Mannequin Self Avatar",
"Realistic Self Avatar",
"No Breathing Motion",
"Breathing Motion",
"COVID 19",
"Three Dimensional Displays",
"Pandemics",
"Conferences",
"Avatars",
"Virtual Environments",
"Virtual Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Purdue University,Department of Computer Graphics Technology,West Lafayette,Indiana,USA,47907",
"fullName": "Dixuan Cui",
"givenName": "Dixuan",
"surname": "Cui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University,Department of Computer Graphics Technology,West Lafayette,Indiana,USA,47907",
"fullName": "Christos Mousas",
"givenName": "Christos",
"surname": "Mousas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200z032",
"articleId": "1CJeJdpu6vC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a007",
"articleId": "1CJdQtzGYyk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504689",
"title": "The impact of a self-avatar on cognitive load in immersive virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504689/12OmNviHKla",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892241",
"title": "Prism aftereffects for throwing with a self-avatar in an immersive virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892241/12OmNxy4N0w",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504761",
"title": "Avatar realism and social interaction quality in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a057",
"title": "Visual Fidelity Effects on Expressive Self-avatar in Virtual Reality: First Impressions Matter",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a057/1CJc41zMnFC",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a352",
"title": "Effects of Optical See-Through Displays on Self-Avatar Appearance in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a352/1J7WodvTPzy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049676",
"title": "The Impact of Avatar and Environment Congruence on Plausibility, Embodiment, Presence, and the Proteus Effect in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049676/1KYosbnM8q4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089510",
"title": "The Self-Avatar Follower Effect in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089510/1jIxamWhlT2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a127",
"title": "Evidence for a Relationship Between Self-Avatar Fixations and Perceived Avatar Similarity within Low-Cost Virtual Reality Embodiment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a127/1tnXDDh8sqk",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a087",
"title": "Distance Estimation with Social Distancing: A Mobile Augmented Reality Study",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a087/1yeQVHZQO8U",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1qTrKNlwI0w",
"title": "2021 International Conference on Information Networking (ICOIN)",
"acronym": "icoin",
"groupId": "1000363",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1qTrROLD2Io",
"doi": "10.1109/ICOIN50884.2021.9334014",
"title": "In-Vehicle Passenger Detection Using FMCW Radar",
"normalizedTitle": "In-Vehicle Passenger Detection Using FMCW Radar",
"abstract": "In this paper, we suggest features for passenger detection inside the vehicle using frequency modulated continuous wave (FMCW) radar. In radar time-frequency spectrum, the magnitude variation of a person is caused by the physiological movements such as breathing and heartbeat. To quantify the physiological movements, the power of respiratory frequency band (0.1-0.4 Hz) and heartbeat frequency band (0.8-1.7 Hz) is used. We experimentally compare the proposed features under presence and absence of a person using FMCW radar signal acquired inside the vehicle.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we suggest features for passenger detection inside the vehicle using frequency modulated continuous wave (FMCW) radar. In radar time-frequency spectrum, the magnitude variation of a person is caused by the physiological movements such as breathing and heartbeat. To quantify the physiological movements, the power of respiratory frequency band (0.1-0.4 Hz) and heartbeat frequency band (0.8-1.7 Hz) is used. We experimentally compare the proposed features under presence and absence of a person using FMCW radar signal acquired inside the vehicle.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we suggest features for passenger detection inside the vehicle using frequency modulated continuous wave (FMCW) radar. In radar time-frequency spectrum, the magnitude variation of a person is caused by the physiological movements such as breathing and heartbeat. To quantify the physiological movements, the power of respiratory frequency band (0.1-0.4 Hz) and heartbeat frequency band (0.8-1.7 Hz) is used. We experimentally compare the proposed features under presence and absence of a person using FMCW radar signal acquired inside the vehicle.",
"fno": "09334014",
"keywords": [
"CW Radar",
"FM Radar",
"Radar Signal Processing",
"Signal Detection",
"Physiological Movements",
"Breathing",
"Heartbeat",
"Respiratory Frequency Band",
"FMCW Radar Signal",
"Frequency Modulated Continuous Wave Radar",
"Radar Time Frequency Spectrum",
"Magnitude Variation",
"In Vehicle Passenger Detection",
"Frequency 0 1 Hz To 0 4 Hz",
"Frequency 0 8 Hz To 1 7 Hz",
"Time Frequency Analysis",
"Frequency Modulation",
"Heart Beat",
"Radar Detection",
"Radar",
"Feature Extraction",
"Physiology",
"Automotive Radar",
"FMCW Radar",
"Radar Signal Processing",
"Passenger Detection",
"Occupancy Detection"
],
"authors": [
{
"affiliation": "Soongsil University,Department of Electronic Engineering,Seoul,South Korea",
"fullName": "Heemang Song",
"givenName": "Heemang",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Soongsil University,Department of Electronic Engineering,Seoul,South Korea",
"fullName": "Youngkeun Yoo",
"givenName": "Youngkeun",
"surname": "Yoo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Soongsil University,Department of Electronic Engineering,Seoul,South Korea",
"fullName": "Hyun-Chool Shin",
"givenName": "Hyun-Chool",
"surname": "Shin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icoin",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "644-647",
"year": "2021",
"issn": "1976-7684",
"isbn": "978-1-7281-9101-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09333873",
"articleId": "1qTrUTZ1xkY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09333950",
"articleId": "1qTrP6pvIxW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cyberc/2015/9200/0/9200a399",
"title": "Doppler Shift and Height Detection of Obstacle Based on FMCW Radar Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/cyberc/2015/9200a399/12OmNBPtJDj",
"parentPublication": {
"id": "proceedings/cyberc/2015/9200/0",
"title": "2015 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cnsi/2011/4417/0/4417a129",
"title": "Performance Analysis of FMCW-UWB Radar for Oil Tank Level Gauge",
"doi": null,
"abstractUrl": "/proceedings-article/cnsi/2011/4417a129/12OmNBQkwZf",
"parentPublication": {
"id": "proceedings/cnsi/2011/4417/0",
"title": "Computers, Networks, Systems and Industrial Engineering, ACIS/JNU International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682304",
"title": "Design Procedures and Considerations of FOD Detection Millimeter-Wave FMCW Radar",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682304/12OmNs0C9AO",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2009/3583/1/3583a528",
"title": "Study on Signal Processing of FMCW Ground Penetrating Radar",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583a528/12OmNxveNIL",
"parentPublication": {
"id": "proceedings/icmtma/2009/3583/3",
"title": "2009 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccce/2016/2427/0/2427a396",
"title": "FMCW Radar for Slow Moving Target Detection: Design and Performance Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iccce/2016/2427a396/12OmNyO8tSC",
"parentPublication": {
"id": "proceedings/iccce/2016/2427/0",
"title": "2016 International Conference on Computer and Communication Engineering (ICCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2018/5500/0/550000b050",
"title": "A Portable 24GHz Doppler Radar System for Distant Human Vital Sign Monitoring",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2018/550000b050/17D45Xh13s0",
"parentPublication": {
"id": "proceedings/icisce/2018/5500/0",
"title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2023/7578/0/757800a389",
"title": "Contactless Vital Signs Tracking with mmWave RADAR in Realtime",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2023/757800a389/1LFLA3HTI52",
"parentPublication": {
"id": "proceedings/bigcomp/2023/7578/0",
"title": "2023 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisce/2019/3681/0/368100a239",
"title": "Moving Target Detection Using the 2D-FFT Algorithm for Automotive FMCW Radars",
"doi": null,
"abstractUrl": "/proceedings-article/cisce/2019/368100a239/1cI60611yG4",
"parentPublication": {
"id": "proceedings/cisce/2019/3681/0",
"title": "2019 International Conference on Communications, Information System and Computer Engineering (CISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2018/1360/0/136000b436",
"title": "A Range Estimator of a Stationary Human among Stationary Clutter for Vital FMCW Radar",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2018/136000b436/1gjRCI2YuIw",
"parentPublication": {
"id": "proceedings/csci/2018/1360/0",
"title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2021/9101/0/09334023",
"title": "Vital information extraction using FMCW radar",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2021/09334023/1qTrNwOCyME",
"parentPublication": {
"id": "proceedings/icoin/2021/9101/0",
"title": "2021 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCmpcNB",
"title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)",
"acronym": "iiaiaai",
"groupId": "1801921",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAnMuFg",
"doi": "10.1109/IIAI-AAI.2014.177",
"title": "Development of Projection Mapping with Utility of Digital Signage",
"normalizedTitle": "Development of Projection Mapping with Utility of Digital Signage",
"abstract": "Recently, Projection Mapping is attracting attention as a new approach for visual expression method. In order to perform large-scale Projection Mapping, multiple projectors are required. In this paper, we propose a new representation technique by various attempts. To determine more impressive visual expression, we have developed Projection Mapping and devised Prodigious Mapping. We have set the projector not to project from many directions, but from one direction. We have been pursuing the possibility that we could use Projection Mapping not only as one of the movie technique but also as a practical approach into a wide range of fields, such as coordination of the shopping mall as regional contribution, transmission of academic contents in the science museum, projection to the historic building to inform the history and culture and so on. We developed the new projection method, \"Gem Mapping\", specializing for indoor settings. In this method, we make objections made with white boards and set them on a wall inside of a building. We create big gems on the wall by projecting video to the objects. Gem Mapping can set inside the building permanently and reduce the costs. We use touch sensor to detect the motions of people and change images to create interactive Projection Mapping. Gem Mapping is a new advertising media to be used in a lot of commercial facilities as digital signage.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recently, Projection Mapping is attracting attention as a new approach for visual expression method. In order to perform large-scale Projection Mapping, multiple projectors are required. In this paper, we propose a new representation technique by various attempts. To determine more impressive visual expression, we have developed Projection Mapping and devised Prodigious Mapping. We have set the projector not to project from many directions, but from one direction. We have been pursuing the possibility that we could use Projection Mapping not only as one of the movie technique but also as a practical approach into a wide range of fields, such as coordination of the shopping mall as regional contribution, transmission of academic contents in the science museum, projection to the historic building to inform the history and culture and so on. We developed the new projection method, \"Gem Mapping\", specializing for indoor settings. In this method, we make objections made with white boards and set them on a wall inside of a building. We create big gems on the wall by projecting video to the objects. Gem Mapping can set inside the building permanently and reduce the costs. We use touch sensor to detect the motions of people and change images to create interactive Projection Mapping. Gem Mapping is a new advertising media to be used in a lot of commercial facilities as digital signage.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recently, Projection Mapping is attracting attention as a new approach for visual expression method. In order to perform large-scale Projection Mapping, multiple projectors are required. In this paper, we propose a new representation technique by various attempts. To determine more impressive visual expression, we have developed Projection Mapping and devised Prodigious Mapping. We have set the projector not to project from many directions, but from one direction. We have been pursuing the possibility that we could use Projection Mapping not only as one of the movie technique but also as a practical approach into a wide range of fields, such as coordination of the shopping mall as regional contribution, transmission of academic contents in the science museum, projection to the historic building to inform the history and culture and so on. We developed the new projection method, \"Gem Mapping\", specializing for indoor settings. In this method, we make objections made with white boards and set them on a wall inside of a building. We create big gems on the wall by projecting video to the objects. Gem Mapping can set inside the building permanently and reduce the costs. We use touch sensor to detect the motions of people and change images to create interactive Projection Mapping. Gem Mapping is a new advertising media to be used in a lot of commercial facilities as digital signage.",
"fno": "06913421",
"keywords": [
"Image Color Analysis",
"Buildings",
"Timing",
"Music",
"Motion Pictures",
"Shape",
"Software",
"Projective Technique",
"Computer Graphics",
"Mapping",
"Gem Mapping",
"Projection Mapping"
],
"authors": [
{
"affiliation": null,
"fullName": "Shota Murayama",
"givenName": "Shota",
"surname": "Murayama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ippei Torii",
"givenName": "Ippei",
"surname": "Torii",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Naohiro Ishii",
"givenName": "Naohiro",
"surname": "Ishii",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iiaiaai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "895-900",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4174-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06913420",
"articleId": "12OmNAlvHBR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06913422",
"articleId": "12OmNrJRPcz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223330",
"title": "Robust high-speed tracking against illumination changes for dynamic projection mapping",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223330/12OmNCdk2JE",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2014/4284/0/4284a173",
"title": "Texture Mapping Based on Projection and Viewpoints",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a173/12OmNvjgWVu",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/culture-computing/2015/8232/0/8232a018",
"title": "Projection Mapping Celebrating RIMPA 400th Anniversary",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2015/8232a018/12OmNvonIKT",
"parentPublication": {
"id": "proceedings/culture-computing/2015/8232/0",
"title": "2015 International Conference on Culture and Computing (Culture Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2015/7935/0/07050851",
"title": "SPARQL based mapping management",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2015/07050851/12OmNyiUBqe",
"parentPublication": {
"id": "proceedings/icsc/2015/7935/0",
"title": "2015 IEEE International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802105",
"title": "Geometrically-correct projection-based texture mapping onto a cloth",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802105/12OmNzVXNZG",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007312",
"title": "FaceForge: Markerless Non-Rigid Face Multi-Projection Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007312/13rRUwInvyG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a756",
"title": "Robust Tangible Projection Mapping with Multi-View Contour-Based Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a756/1CJeF1WYP1m",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2019/4021/0/402100a121",
"title": "A Proposal of Interactive Projection Mapping by Touching Rays Visualized by Smoke",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2019/402100a121/1grPm3jYeMU",
"parentPublication": {
"id": "proceedings/nicoint/2019/4021/0",
"title": "2019 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a597",
"title": "Dynamic Projection Mapping with 3D Images Using Volumetric Display",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a597/1tnX0LxdiuI",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiea/2021/3265/0/326500a157",
"title": "Research on the Methods of Panoramic Video Projection Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/aiea/2021/326500a157/1wzsE6D2J6U",
"parentPublication": {
"id": "proceedings/aiea/2021/3265/0",
"title": "2021 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysikN6QOQ",
"doi": "10.1109/ISMAR-Adjunct.2019.00-33",
"title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System",
"normalizedTitle": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System",
"abstract": "Projector-camera systems are commonly used for projector calibration. Conventional methods usually use stationary cameras and temporal coded structured light (SL). In this paper, we propose a projector calibration method using a mobile camera and spatial coded SL. Our method allows the users to use a handheld camera to carry out projector calibration and therefore reduce the effort and time required for camera setup. Although the decoding of temporal coded SL can be error-prone in a real-world situation, our method can achieve robust calibration results by taking advantage of the multi-view observations of the projection thanks to the mobility of the camera. Experiments show that the result of our method is comparable with that of a checkerboard-based approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Projector-camera systems are commonly used for projector calibration. Conventional methods usually use stationary cameras and temporal coded structured light (SL). In this paper, we propose a projector calibration method using a mobile camera and spatial coded SL. Our method allows the users to use a handheld camera to carry out projector calibration and therefore reduce the effort and time required for camera setup. Although the decoding of temporal coded SL can be error-prone in a real-world situation, our method can achieve robust calibration results by taking advantage of the multi-view observations of the projection thanks to the mobility of the camera. Experiments show that the result of our method is comparable with that of a checkerboard-based approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Projector-camera systems are commonly used for projector calibration. Conventional methods usually use stationary cameras and temporal coded structured light (SL). In this paper, we propose a projector calibration method using a mobile camera and spatial coded SL. Our method allows the users to use a handheld camera to carry out projector calibration and therefore reduce the effort and time required for camera setup. Although the decoding of temporal coded SL can be error-prone in a real-world situation, our method can achieve robust calibration results by taking advantage of the multi-view observations of the projection thanks to the mobility of the camera. Experiments show that the result of our method is comparable with that of a checkerboard-based approach.",
"fno": "476500a261",
"keywords": [
"Calibration",
"Cameras",
"Decoding",
"Image Coding",
"Optical Projectors",
"Handheld Camera",
"Projector Calibration Method",
"Mobile Camera",
"Projection Mapping System",
"Projector Camera Systems",
"Stationary Cameras",
"Temporal Coded Structured Light",
"Spatial Coded SL",
"Temporal Coded SL Decoding",
"Checkerboard Based Approach",
"Cameras",
"Calibration",
"Diamond",
"Three Dimensional Displays",
"Feature Extraction",
"Image Reconstruction",
"Surface Reconstruction",
"Projector Camera System",
"Projector Calibration",
"Projection Mapping",
"Augmented Reality"
],
"authors": [
{
"affiliation": "University of Tsukuba",
"fullName": "Chun Xie",
"givenName": "Chun",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tsukuba",
"fullName": "Hidehiko Shishido",
"givenName": "Hidehiko",
"surname": "Shishido",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tsukuba",
"fullName": "Yoshinari Kameda",
"givenName": "Yoshinari",
"surname": "Kameda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tsukuba",
"fullName": "Itaru Kitahara",
"givenName": "Itaru",
"surname": "Kitahara",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "261-262",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a259",
"articleId": "1gysj1o4L16",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a263",
"articleId": "1gysjVP83qo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223400",
"title": "Semi-automatic calibration of a projector-camera system using arbitrary objects with known geometry",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223400/12OmNBJw9RK",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981726",
"title": "Fully automatic multi-projector calibration with an uncalibrated camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981726/12OmNBSBk4F",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761601",
"title": "Calibration of projector-camera systems from virtual mutual projection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a042",
"title": "Robust Geometric Self-Calibration of Generic Multi-Projector Camera Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a042/12OmNCbCrRh",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2009/3994/0/05204317",
"title": "Geometric video projector auto-calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC",
"parentPublication": {
"id": "proceedings/cvprw/2009/3994/0",
"title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a464",
"title": "Simple, Accurate, and Robust Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a320",
"title": "Active Calibration of Camera-Projector Systems Based on Planar Homography",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460078",
"title": "Calibration-free projector-camera system for spatial augmented reality on planar surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460078/12OmNzUxO4G",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699178",
"title": "A Single-Shot-Per-Pose Camera-Projector Calibration System for Imperfect Planar Targets",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699178/19F1O0IjR8k",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysyl9FDhu",
"doi": "10.1109/ISMAR50242.2020.00039",
"title": "Real-Time Adaptive Color Correction in Dynamic Projection Mapping",
"normalizedTitle": "Real-Time Adaptive Color Correction in Dynamic Projection Mapping",
"abstract": "Projection mapping augments a real-world object's appearance by projecting digital content on its surface. However, a remaining obstacle to immersive projection mapping is the limitation to white Lambertian surfaces and uniform neutral environment light, if any. Violating one of these assumptions results in a discernible difference between the source material and the appearance of the projected content. For example, some colors may not be visible due to intense environment lighting or pronounced surface colors. We present a system that actively subdues many of those real-world influences, especially environment lighting. Our system supports dynamic (i.e., movable) target objects as well as changing lighting conditions while requiring no prior color calibration of the projector nor any precomputed environment probing. We automatically and continuously estimate these influences during runtime in a real-time feedback-loop and adjust the projected colors accordingly.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Projection mapping augments a real-world object's appearance by projecting digital content on its surface. However, a remaining obstacle to immersive projection mapping is the limitation to white Lambertian surfaces and uniform neutral environment light, if any. Violating one of these assumptions results in a discernible difference between the source material and the appearance of the projected content. For example, some colors may not be visible due to intense environment lighting or pronounced surface colors. We present a system that actively subdues many of those real-world influences, especially environment lighting. Our system supports dynamic (i.e., movable) target objects as well as changing lighting conditions while requiring no prior color calibration of the projector nor any precomputed environment probing. We automatically and continuously estimate these influences during runtime in a real-time feedback-loop and adjust the projected colors accordingly.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Projection mapping augments a real-world object's appearance by projecting digital content on its surface. However, a remaining obstacle to immersive projection mapping is the limitation to white Lambertian surfaces and uniform neutral environment light, if any. Violating one of these assumptions results in a discernible difference between the source material and the appearance of the projected content. For example, some colors may not be visible due to intense environment lighting or pronounced surface colors. We present a system that actively subdues many of those real-world influences, especially environment lighting. Our system supports dynamic (i.e., movable) target objects as well as changing lighting conditions while requiring no prior color calibration of the projector nor any precomputed environment probing. We automatically and continuously estimate these influences during runtime in a real-time feedback-loop and adjust the projected colors accordingly.",
"fno": "850800a174",
"keywords": [
"Calibration",
"Image Colour Analysis",
"Lighting",
"Optical Projectors",
"Time Adaptive Color Correction",
"Dynamic Projection Mapping",
"Real World Object",
"Digital Content",
"Immersive Projection Mapping",
"White Lambertian Surfaces",
"Uniform Neutral Environment Light",
"Assumptions Results",
"Discernible Difference",
"Source Material",
"Projected Content",
"Intense Environment Lighting",
"Pronounced Surface Colors",
"Lighting Conditions",
"Prior Color Calibration",
"Precomputed Environment Probing",
"Real Time Feedback Loop",
"Projected Colors",
"Runtime",
"Lighting",
"Color",
"Cameras",
"Real Time Systems",
"Calibration",
"Systems Support",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Mixed Augmented Reality",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Perception"
],
"authors": [
{
"affiliation": "Friedrich-Alexander University Erlangen-Nuremberg,Visual Computing",
"fullName": "Philipp Kurth",
"givenName": "Philipp",
"surname": "Kurth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Friedrich-Alexander University Erlangen-Nuremberg,Visual Computing",
"fullName": "Vanessa Lange",
"givenName": "Vanessa",
"surname": "Lange",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Friedrich-Alexander University Erlangen-Nuremberg,Visual Computing",
"fullName": "Marc Stamminger",
"givenName": "Marc",
"surname": "Stamminger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Friedrich-Alexander University Erlangen-Nuremberg,Visual Computing",
"fullName": "Frank Bauer",
"givenName": "Frank",
"surname": "Bauer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "174-184",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a164",
"articleId": "1pysuGClQ9a",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a185",
"articleId": "1pysx79ZXcA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391a298",
"title": "Beyond White: Ground Truth Colors for Color Constancy Correction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a298/12OmNqBKTY8",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/worv/2013/5646/0/06521924",
"title": "Color-based detection robust to varying illumination spectrum",
"doi": null,
"abstractUrl": "/proceedings-article/worv/2013/06521924/12OmNqJq4EZ",
"parentPublication": {
"id": "proceedings/worv/2013/5646/0",
"title": "2013 IEEE Workshop on Robot Vision (WORV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223335",
"title": "Light field projection for lighting reproduction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223335/12OmNs0C9XZ",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mcsi/2014/4324/0/4324a154",
"title": "Skin Color Analysis and Segmentation in Complex Outdoor Background",
"doi": null,
"abstractUrl": "/proceedings-article/mcsi/2014/4324a154/12OmNvlPkEk",
"parentPublication": {
"id": "proceedings/mcsi/2014/4324/0",
"title": "2014 International Conference on Mathematics and Computers in Sciences and in Industry (MCSI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichit/2006/2674/1/04021123",
"title": "A Color Correction System using a Color Compensation Chart",
"doi": null,
"abstractUrl": "/proceedings-article/ichit/2006/04021123/12OmNxWLTDY",
"parentPublication": {
"id": "proceedings/ichit/2006/2674/1",
"title": "2006 International Conference on Hybrid Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2013/4983/0/4983a210",
"title": "Exploiting Color Constancy for Compensating Projected Images on Non-white Light Projection Screen",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2013/4983a210/12OmNzYwc4C",
"parentPublication": {
"id": "proceedings/crv/2013/4983/0",
"title": "2013 International Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444808",
"title": "More than meets the eye: An engineering study to empirically examine the blending of real and virtual color spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444808/12OmNzlD9ih",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2009/3883/0/3883a113",
"title": "Progressive Image Color Neutralization Based on Adaptive Histogram Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2009/3883a113/12OmNzw8iX4",
"parentPublication": {
"id": "proceedings/icig/2009/3883/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/02/ttg2013020236",
"title": "Truthful Color Reproduction in Spatial Augmented Reality Applications",
"doi": null,
"abstractUrl": "/journal/tg/2013/02/ttg2013020236/13rRUxBJhmR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2022/6908/0/690800a021",
"title": "Perceptual Control of Food Taste with Projection Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2022/690800a021/1FWmZYvi4MM",
"parentPublication": {
"id": "proceedings/nicoint/2022/6908/0",
"title": "2022 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvEyR7P",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "4",
"displayVolume": "4",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAqU4UU",
"doi": "10.1109/ICPR.2006.642",
"title": "Human-Robot Interaction by Whole Body Gesture Spotting and Recognition",
"normalizedTitle": "Human-Robot Interaction by Whole Body Gesture Spotting and Recognition",
"abstract": "An intelligent robot is required for natural interaction with humans. Visual interpretation of gestures can be useful in accomplishing natural Human-Robot Interaction (HRI). Previous HRI research focused on issues such as hand gesture, sign language, and command gesture recognition. Automatic recognition of whole body gestures is required in order for HRI to operate naturally. This presents a challenging problem, because describing and modeling meaningful gesture patterns from whole body gestures, is a complex task. This paper presents a new method for recognition of whole body key gestures in HRI. A human subject is first described by a set of features, encoding the angular relationship between a dozen body parts in 3D. A feature vector is then mapped to a codeword of gesture HMMs. In order to spot key gestures accurately, a sophisticated method of designing a garbage gesture model is proposed; model reduction, which merges similar states, based on data-dependent statistics and relative entropy. The proposed method has been tested with 20 persons? samples and 200 synthetic data. The proposed method achieved a reliability rate of 94.8% in spotting task and a recognition rate of 97.4% from an isolated gesture.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An intelligent robot is required for natural interaction with humans. Visual interpretation of gestures can be useful in accomplishing natural Human-Robot Interaction (HRI). Previous HRI research focused on issues such as hand gesture, sign language, and command gesture recognition. Automatic recognition of whole body gestures is required in order for HRI to operate naturally. This presents a challenging problem, because describing and modeling meaningful gesture patterns from whole body gestures, is a complex task. This paper presents a new method for recognition of whole body key gestures in HRI. A human subject is first described by a set of features, encoding the angular relationship between a dozen body parts in 3D. A feature vector is then mapped to a codeword of gesture HMMs. In order to spot key gestures accurately, a sophisticated method of designing a garbage gesture model is proposed; model reduction, which merges similar states, based on data-dependent statistics and relative entropy. The proposed method has been tested with 20 persons? samples and 200 synthetic data. The proposed method achieved a reliability rate of 94.8% in spotting task and a recognition rate of 97.4% from an isolated gesture.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An intelligent robot is required for natural interaction with humans. Visual interpretation of gestures can be useful in accomplishing natural Human-Robot Interaction (HRI). Previous HRI research focused on issues such as hand gesture, sign language, and command gesture recognition. Automatic recognition of whole body gestures is required in order for HRI to operate naturally. This presents a challenging problem, because describing and modeling meaningful gesture patterns from whole body gestures, is a complex task. This paper presents a new method for recognition of whole body key gestures in HRI. A human subject is first described by a set of features, encoding the angular relationship between a dozen body parts in 3D. A feature vector is then mapped to a codeword of gesture HMMs. In order to spot key gestures accurately, a sophisticated method of designing a garbage gesture model is proposed; model reduction, which merges similar states, based on data-dependent statistics and relative entropy. The proposed method has been tested with 20 persons? samples and 200 synthetic data. The proposed method achieved a reliability rate of 94.8% in spotting task and a recognition rate of 97.4% from an isolated gesture.",
"fno": "252140774",
"keywords": [],
"authors": [
{
"affiliation": "Korea University",
"fullName": "Hee-Deok Yang",
"givenName": "Hee-Deok",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Korea University",
"fullName": "A-Yeon Park",
"givenName": "A-Yeon",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Korea University",
"fullName": "Seong-Whan Lee",
"givenName": "Seong-Whan",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-08-01T00:00:00",
"pubType": "proceedings",
"pages": "774-777",
"year": "2006",
"issn": "1051-4651",
"isbn": "0-7695-2521-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "252140770",
"articleId": "12OmNx76TJ6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "252140778",
"articleId": "12OmNxYbSYe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2006/2503/0/25030243",
"title": "A Full-Body Gesture Database for Automatic Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2006/25030243/12OmNvzJG1E",
"parentPublication": {
"id": "proceedings/fg/2006/2503/0",
"title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2006/2503/0/25030645",
"title": "Automatic Gesture Recognition for Intelligent Human-Robot Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2006/25030645/12OmNx0RIKl",
"parentPublication": {
"id": "proceedings/fg/2006/2503/0",
"title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/1/252111231",
"title": "Simultaneous Gesture Segmentation and Recognition based on Forward Spotting Accumulative HMMs",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252111231/12OmNy50gh9",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761681",
"title": "View-invariant full-body gesture recognition from video",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761681/12OmNyKrH5o",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kse/2011/4567/0/4567a232",
"title": "Wizard of Oz for Designing Hand Gesture Vocabulary in Human-Robot Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/kse/2011/4567a232/12OmNzDehar",
"parentPublication": {
"id": "proceedings/kse/2011/4567/0",
"title": "Knowledge and Systems Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771448",
"title": "Tracking body and hands for gesture recognition: NATOPS aircraft handling signals database",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771448/12OmNzdoMDb",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2006/2503/0/25030231",
"title": "Robust Spotting of Key Gestures from Whole Body Motion Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2006/25030231/12OmNzkuKDG",
"parentPublication": {
"id": "proceedings/fg/2006/2503/0",
"title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2021/02/08493586",
"title": "Survey on Emotional Body Gesture Recognition",
"doi": null,
"abstractUrl": "/journal/ta/2021/02/08493586/14qdcQU04il",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798182",
"title": "Selection and Manipulation Whole-Body Gesture Elicitation Study in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798182/1cJ0GVPhN96",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798105",
"title": "Selection and Manipulation Whole-Body Gesture Elicitation Study In Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798105/1cJ0Qs2rZCg",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwE9OtJ",
"title": "2009 Third IEEE International Conference on Space Mission Challenges for Information Technology (SMC-IT 2009)",
"acronym": "smc-it",
"groupId": "1002093",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNArthdB",
"doi": "10.1109/SMC-IT.2009.40",
"title": "Rapid Prototyping of Planning & Scheduling Tools",
"normalizedTitle": "Rapid Prototyping of Planning & Scheduling Tools",
"abstract": "The Advanced Planning and Scheduling Initiative, or APSI, is an ESA programme to design and implement an Artificial Intelligence (AI) software infrastructure for planning and scheduling that can generically support different types and classes of space mission operations. The goal of the APSI is twofold: (1)~creating a software framework to improve the cost-effectiveness and flexibility of mission planning support tool development; (2)~bridging the gap between AI planning and scheduling technology and the world of space mission planning. A key aspect of the success of this project is the presence of a flexible timeline representation module that allows to exploit alternatives in the modeling of mission features. This paper shows an example of such a flexibility by using a real problem in the space realm - the HERSCHEL Science Long Term Planning process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Advanced Planning and Scheduling Initiative, or APSI, is an ESA programme to design and implement an Artificial Intelligence (AI) software infrastructure for planning and scheduling that can generically support different types and classes of space mission operations. The goal of the APSI is twofold: (1)~creating a software framework to improve the cost-effectiveness and flexibility of mission planning support tool development; (2)~bridging the gap between AI planning and scheduling technology and the world of space mission planning. A key aspect of the success of this project is the presence of a flexible timeline representation module that allows to exploit alternatives in the modeling of mission features. This paper shows an example of such a flexibility by using a real problem in the space realm - the HERSCHEL Science Long Term Planning process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Advanced Planning and Scheduling Initiative, or APSI, is an ESA programme to design and implement an Artificial Intelligence (AI) software infrastructure for planning and scheduling that can generically support different types and classes of space mission operations. The goal of the APSI is twofold: (1)~creating a software framework to improve the cost-effectiveness and flexibility of mission planning support tool development; (2)~bridging the gap between AI planning and scheduling technology and the world of space mission planning. A key aspect of the success of this project is the presence of a flexible timeline representation module that allows to exploit alternatives in the modeling of mission features. This paper shows an example of such a flexibility by using a real problem in the space realm - the HERSCHEL Science Long Term Planning process.",
"fno": "3637a270",
"keywords": [
"PLANNING",
"SCHEDULING",
"SCIENCE OPERATIONS",
"RAPID PROTOTYPING"
],
"authors": [
{
"affiliation": null,
"fullName": "Amedeo Cesta",
"givenName": "Amedeo",
"surname": "Cesta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Simone Fratini",
"givenName": "Simone",
"surname": "Fratini",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Alessandro Donati",
"givenName": "Alessandro",
"surname": "Donati",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Henrique Oliveira",
"givenName": "Henrique",
"surname": "Oliveira",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nicola Policella",
"givenName": "Nicola",
"surname": "Policella",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "smc-it",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-07-01T00:00:00",
"pubType": "proceedings",
"pages": "270-277",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3637-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3637a264",
"articleId": "12OmNyYDDAC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3637a281",
"articleId": "12OmNx4yvtM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/rsp/2008/3180/0/3180a017",
"title": "Functional DIF for Rapid Prototyping",
"doi": null,
"abstractUrl": "/proceedings-article/rsp/2008/3180a017/12OmNCcKQu6",
"parentPublication": {
"id": "proceedings/rsp/2008/3180/0",
"title": "2008 19th IEEE/IFIP International Symposium on Rapid System Prototyping",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rsp/1999/0246/0/02460140",
"title": "Scheduling Strategies and Estimations for Concept-Oriented Rapid Prototyping",
"doi": null,
"abstractUrl": "/proceedings-article/rsp/1999/02460140/12OmNrnJ6OC",
"parentPublication": {
"id": "proceedings/rsp/1999/0246/0",
"title": "Rapid System Prototyping, IEEE International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcsw/2003/1921/0/19210042",
"title": "A Real-Time Scheduling Approach for a Web-Based Rapid Prototyping Manufacturing Platform",
"doi": null,
"abstractUrl": "/proceedings-article/icdcsw/2003/19210042/12OmNxiKrYW",
"parentPublication": {
"id": "proceedings/icdcsw/2003/1921/0",
"title": "23rd International Conference on Distributed Computing Systems Workshops, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2010/4077/2/4077c714",
"title": "Modeling and Verification for Planning and Scheduling in a Workflow Framework",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2010/4077c714/12OmNzVoBVM",
"parentPublication": {
"id": "proceedings/icicta/2010/4077/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ds/2007/05/o5001",
"title": "Guest Editor's Introduction: Rapid System Prototyping",
"doi": null,
"abstractUrl": "/magazine/ds/2007/05/o5001/13rRUxAATch",
"parentPublication": {
"id": "mags/ds",
"title": "IEEE Distributed Systems Online",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ds/2007/04/o4007",
"title": "Guest Editor's Introduction: Rapid System Prototyping",
"doi": null,
"abstractUrl": "/magazine/ds/2007/04/o4007/13rRUxBJhqs",
"parentPublication": {
"id": "mags/ds",
"title": "IEEE Distributed Systems Online",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/1991/02/k0160",
"title": "Knowledge-Based Approaches for Scheduling Problems: A Survey",
"doi": null,
"abstractUrl": "/journal/tk/1991/02/k0160/13rRUxNmPE2",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ds/2007/03/o3007",
"title": "Guest Editor's Introduction: Rapid System Prototyping",
"doi": null,
"abstractUrl": "/magazine/ds/2007/03/o3007/13rRUy3gmYQ",
"parentPublication": {
"id": "mags/ds",
"title": "IEEE Distributed Systems Online",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2003/02/x2008",
"title": "A day in an astronaut's life: reflections on advanced planning and scheduling technology",
"doi": null,
"abstractUrl": "/magazine/ex/2003/02/x2008/13rRUyuegl3",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlke/2022/9567/0/956700a110",
"title": "An AI Planning Approach to Factory Production Planning and Scheduling",
"doi": null,
"abstractUrl": "/proceedings-article/mlke/2022/956700a110/1CY804bWOR2",
"parentPublication": {
"id": "proceedings/mlke/2022/9567/0",
"title": "2022 International Conference on Machine Learning and Knowledge Engineering (MLKE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzzxuxo",
"title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)",
"acronym": "fg",
"groupId": "1000065",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCwUmBP",
"doi": "10.1109/AFGR.2000.840669",
"title": "Exploiting Speech/Gesture Co-occurrence for Improving Continuous Gesture Recognition in Weather Narration",
"normalizedTitle": "Exploiting Speech/Gesture Co-occurrence for Improving Continuous Gesture Recognition in Weather Narration",
"abstract": "In order to incorporate naturalness in the design of Human Computer Interfaces (HCI), it is desirable to develop recognition techniques capable of handling continuous natural gesture and speech inputs. Though many different researchers have reported high recognition rates for gesture recognition using Hidden Markov Models (HMMs), the gestures used are mostly pre-defined and are bound with syntactical and grammatical constraints. But natural gestures do not string together in syntactical bindings. Moreover, strict classification of natural gestures is not feasible.In this paper we have examined hand gestures made in a very natural domain, that of a weather person narrating in front of a weather map. The gestures made by the weather person are embedded in a narration. This provides us with abundant data from an uncontrolled environment to study the interaction between speech and gesture in the context of a display. We hypothesize that this domain is very similar to that of a natural human-computer interface. We present an HMMs architecture for continuous gesture recognition framework and keyword spotting. To explore the relation between gesture and speech, we conducted a statistical co-occurrence analysis of different gestures with a selected set of spoken keywords. We then demonstrate how this co-occurrence analysis can be exploited to improve the performance of continuous gesture recognition.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In order to incorporate naturalness in the design of Human Computer Interfaces (HCI), it is desirable to develop recognition techniques capable of handling continuous natural gesture and speech inputs. Though many different researchers have reported high recognition rates for gesture recognition using Hidden Markov Models (HMMs), the gestures used are mostly pre-defined and are bound with syntactical and grammatical constraints. But natural gestures do not string together in syntactical bindings. Moreover, strict classification of natural gestures is not feasible.In this paper we have examined hand gestures made in a very natural domain, that of a weather person narrating in front of a weather map. The gestures made by the weather person are embedded in a narration. This provides us with abundant data from an uncontrolled environment to study the interaction between speech and gesture in the context of a display. We hypothesize that this domain is very similar to that of a natural human-computer interface. We present an HMMs architecture for continuous gesture recognition framework and keyword spotting. To explore the relation between gesture and speech, we conducted a statistical co-occurrence analysis of different gestures with a selected set of spoken keywords. We then demonstrate how this co-occurrence analysis can be exploited to improve the performance of continuous gesture recognition.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In order to incorporate naturalness in the design of Human Computer Interfaces (HCI), it is desirable to develop recognition techniques capable of handling continuous natural gesture and speech inputs. Though many different researchers have reported high recognition rates for gesture recognition using Hidden Markov Models (HMMs), the gestures used are mostly pre-defined and are bound with syntactical and grammatical constraints. But natural gestures do not string together in syntactical bindings. Moreover, strict classification of natural gestures is not feasible.In this paper we have examined hand gestures made in a very natural domain, that of a weather person narrating in front of a weather map. The gestures made by the weather person are embedded in a narration. This provides us with abundant data from an uncontrolled environment to study the interaction between speech and gesture in the context of a display. We hypothesize that this domain is very similar to that of a natural human-computer interface. We present an HMMs architecture for continuous gesture recognition framework and keyword spotting. To explore the relation between gesture and speech, we conducted a statistical co-occurrence analysis of different gestures with a selected set of spoken keywords. We then demonstrate how this co-occurrence analysis can be exploited to improve the performance of continuous gesture recognition.",
"fno": "05800422",
"keywords": [
"Gesture Recognition",
"Speech Gesture Co Occurrence",
"Gesture Spotting"
],
"authors": [
{
"affiliation": "Pennsylvania State University",
"fullName": "Rajeev Sharma",
"givenName": "Rajeev",
"surname": "Sharma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Pennsylvania State University",
"fullName": "Jiongyu Cai",
"givenName": "Jiongyu",
"surname": "Cai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Pennsylvania State University",
"fullName": "Srivat Chakravarthy",
"givenName": "Srivat",
"surname": "Chakravarthy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Pennsylvania State University",
"fullName": "Indrajit Poddar",
"givenName": "Indrajit",
"surname": "Poddar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Pennsylvania State University",
"fullName": "Yogesh Sethi",
"givenName": "Yogesh",
"surname": "Sethi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-03-01T00:00:00",
"pubType": "proceedings",
"pages": "422",
"year": "2000",
"issn": null,
"isbn": "0-7695-0580-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05800416",
"articleId": "12OmNyUFg1e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05800428",
"articleId": "12OmNqGA5gT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyKJiaV",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqBbHxO",
"doi": "10.1109/ICPR.2010.938",
"title": "A Robust Method for Hand Gesture Segmentation and Recognition Using Forward Spotting Scheme in Conditional Random Fields",
"normalizedTitle": "A Robust Method for Hand Gesture Segmentation and Recognition Using Forward Spotting Scheme in Conditional Random Fields",
"abstract": "This paper proposes a forward spotting method that handles hand gesture segmentation and recognition simultaneously without time delay. To spot meaningful gestures of numbers (0-9) accurately, a stochastic method for designing a non-gesture model using Conditional Random Fields (CRFs) is proposed without training data. The non-gesture model provides a confidence measures that are used as an adaptive threshold to find the start and the end point of meaningful gestures. Experimental results show that the proposed method can successfully recognize isolated gestures with 96.51% and meaningful gestures with 90.49% reliability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a forward spotting method that handles hand gesture segmentation and recognition simultaneously without time delay. To spot meaningful gestures of numbers (0-9) accurately, a stochastic method for designing a non-gesture model using Conditional Random Fields (CRFs) is proposed without training data. The non-gesture model provides a confidence measures that are used as an adaptive threshold to find the start and the end point of meaningful gestures. Experimental results show that the proposed method can successfully recognize isolated gestures with 96.51% and meaningful gestures with 90.49% reliability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a forward spotting method that handles hand gesture segmentation and recognition simultaneously without time delay. To spot meaningful gestures of numbers (0-9) accurately, a stochastic method for designing a non-gesture model using Conditional Random Fields (CRFs) is proposed without training data. The non-gesture model provides a confidence measures that are used as an adaptive threshold to find the start and the end point of meaningful gestures. Experimental results show that the proposed method can successfully recognize isolated gestures with 96.51% and meaningful gestures with 90.49% reliability.",
"fno": "4109d850",
"keywords": [
"Gesture Spotting",
"Gesture Recognition",
"Pattern Recognition",
"Computer Vision"
],
"authors": [
{
"affiliation": null,
"fullName": "Mahmoud Elmezain",
"givenName": "Mahmoud",
"surname": "Elmezain",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ayoub Al-Hamadi",
"givenName": "Ayoub",
"surname": "Al-Hamadi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bernd Michaelis",
"givenName": "Bernd",
"surname": "Michaelis",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "3850-3853",
"year": "2010",
"issn": "1051-4651",
"isbn": "978-0-7695-4109-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4109d846",
"articleId": "12OmNAmVH9I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4109d854",
"articleId": "12OmNx57HP1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isspit/2010/9992/0/05711749",
"title": "Robust methods for hand gesture spotting and recognition using Hidden Markov Models and Conditional Random Fields",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2010/05711749/12OmNAXPy2B",
"parentPublication": {
"id": "proceedings/isspit/2010/9992/0",
"title": "2010 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2000/0580/0/05800422",
"title": "Exploiting Speech/Gesture Co-occurrence for Improving Continuous Gesture Recognition in Weather Narration",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2000/05800422/12OmNCwUmBP",
"parentPublication": {
"id": "proceedings/fg/2000/0580/0",
"title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583013",
"title": "Activity gesture spotting using a threshold model based on Adaptive Boosting",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583013/12OmNqBtiJn",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d780",
"title": "A Framework for Hand Gesture Recognition and Spotting Using Sub-gesture Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d780/12OmNqBtj7c",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmv/2009/3944/0/3944a123",
"title": "Discriminative Models-Based Hand Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icmv/2009/3944a123/12OmNwnYG4h",
"parentPublication": {
"id": "proceedings/icmv/2009/3944/0",
"title": "Machine Vision, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a761",
"title": "ChaLearn Looking at People RGB-D Isolated and Continuous Datasets for Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a761/12OmNzUxO9l",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/1996/7713/0/77130318",
"title": "Spotting recognition of human gestures from time-varying images",
"doi": null,
"abstractUrl": "/proceedings-article/fg/1996/77130318/12OmNzt0IHR",
"parentPublication": {
"id": "proceedings/fg/1996/7713/0",
"title": "Proceedings of the Second International Conference on Automatic Face and Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2009/07/ttp2009071264",
"title": "Sign Language Spotting with a Threshold Model Based on Conditional Random Fields",
"doi": null,
"abstractUrl": "/journal/tp/2009/07/ttp2009071264/13rRUwhHcRX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/06/ttp2011061175",
"title": "Online Gesture Spotting from Visual Hull Data",
"doi": null,
"abstractUrl": "/journal/tp/2011/06/ttp2011061175/13rRUygT7gw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873969",
"title": "Gesture Spotter: A Rapid Prototyping Tool for Key Gesture Spotting in Virtual and Augmented Reality Applications",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873969/1GjwKZEQiFa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwCJOWF",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqBtiJn",
"doi": "10.1109/ICME.2010.5583013",
"title": "Activity gesture spotting using a threshold model based on Adaptive Boosting",
"normalizedTitle": "Activity gesture spotting using a threshold model based on Adaptive Boosting",
"abstract": "Gesture spotting is the task of detecting and recognizing gestures defined in a vocabulary. The difficulty of gesture spotting stems from the fact that valid gestures appear sporadically in a continuous gesture stream, interspersed with invalid gestures (movements that do not correspond to any gesture contained in the vocabulary). In this paper, a novel method for designing threshold models from valid gesture models learnt through Adaptive Boosting is proposed. This threshold model is adaptive in nature and discriminates between valid and invalid gestures. Furthermore, a gesture spotting network consisting of the individual gesture models and the threshold model is proposed to perform the task of spotting and recognition simultaneously. This technique is evaluated in the context of spotting and recognizing activity gestures (hand gestures) from continuous accelerometer data streams. The proposed technique results in a precision of 0.78 and a recall of 0.93 out performing the HMM based threshold model which resulted in 0.4 and 0.81 precision and recall values.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Gesture spotting is the task of detecting and recognizing gestures defined in a vocabulary. The difficulty of gesture spotting stems from the fact that valid gestures appear sporadically in a continuous gesture stream, interspersed with invalid gestures (movements that do not correspond to any gesture contained in the vocabulary). In this paper, a novel method for designing threshold models from valid gesture models learnt through Adaptive Boosting is proposed. This threshold model is adaptive in nature and discriminates between valid and invalid gestures. Furthermore, a gesture spotting network consisting of the individual gesture models and the threshold model is proposed to perform the task of spotting and recognition simultaneously. This technique is evaluated in the context of spotting and recognizing activity gestures (hand gestures) from continuous accelerometer data streams. The proposed technique results in a precision of 0.78 and a recall of 0.93 out performing the HMM based threshold model which resulted in 0.4 and 0.81 precision and recall values.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Gesture spotting is the task of detecting and recognizing gestures defined in a vocabulary. The difficulty of gesture spotting stems from the fact that valid gestures appear sporadically in a continuous gesture stream, interspersed with invalid gestures (movements that do not correspond to any gesture contained in the vocabulary). In this paper, a novel method for designing threshold models from valid gesture models learnt through Adaptive Boosting is proposed. This threshold model is adaptive in nature and discriminates between valid and invalid gestures. Furthermore, a gesture spotting network consisting of the individual gesture models and the threshold model is proposed to perform the task of spotting and recognition simultaneously. This technique is evaluated in the context of spotting and recognizing activity gestures (hand gestures) from continuous accelerometer data streams. The proposed technique results in a precision of 0.78 and a recall of 0.93 out performing the HMM based threshold model which resulted in 0.4 and 0.81 precision and recall values.",
"fno": "05583013",
"keywords": [],
"authors": [
{
"affiliation": "Center for Cognitive Ubiquitous Computing, School of Computing Informatics Decision Systems and Engineering, Arizona State University, Tempe, AZ 85281",
"fullName": "Narayanan C Krishnan",
"givenName": "Narayanan C",
"surname": "Krishnan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Cognitive Ubiquitous Computing, School of Computing Informatics Decision Systems and Engineering, Arizona State University, Tempe, AZ 85281",
"fullName": "Prasanth Lade",
"givenName": "Prasanth",
"surname": "Lade",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Cognitive Ubiquitous Computing, School of Computing Informatics Decision Systems and Engineering, Arizona State University, Tempe, AZ 85281",
"fullName": "Sethuraman Panchanathan",
"givenName": "Sethuraman",
"surname": "Panchanathan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-07-01T00:00:00",
"pubType": "proceedings",
"pages": "155-160",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-7491-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05583018",
"articleId": "12OmNwDSdgD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05583022",
"articleId": "12OmNwfsI21",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isspit/2010/9992/0/05711749",
"title": "Robust methods for hand gesture spotting and recognition using Hidden Markov Models and Conditional Random Fields",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2010/05711749/12OmNAXPy2B",
"parentPublication": {
"id": "proceedings/isspit/2010/9992/0",
"title": "2010 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/4/252140774",
"title": "Human-Robot Interaction by Whole Body Gesture Spotting and Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252140774/12OmNAqU4UU",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/4",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d850",
"title": "A Robust Method for Hand Gesture Segmentation and Recognition Using Forward Spotting Scheme in Conditional Random Fields",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d850/12OmNqBbHxO",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d780",
"title": "A Framework for Hand Gesture Recognition and Spotting Using Sub-gesture Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d780/12OmNqBtj7c",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/1/252111231",
"title": "Simultaneous Gesture Segmentation and Recognition based on Forward Spotting Accumulative HMMs",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252111231/12OmNy50gh9",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2006/2503/0/25030231",
"title": "Robust Spotting of Key Gestures from Whole Body Motion Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2006/25030231/12OmNzkuKDG",
"parentPublication": {
"id": "proceedings/fg/2006/2503/0",
"title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/1996/7713/0/77130318",
"title": "Spotting recognition of human gestures from time-varying images",
"doi": null,
"abstractUrl": "/proceedings-article/fg/1996/77130318/12OmNzt0IHR",
"parentPublication": {
"id": "proceedings/fg/1996/7713/0",
"title": "Proceedings of the Second International Conference on Automatic Face and Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1999/10/i0961",
"title": "An HMM-Based Threshold Model Approach for Gesture Recognition",
"doi": null,
"abstractUrl": "/journal/tp/1999/10/i0961/13rRUxly9eW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/06/ttp2011061175",
"title": "Online Gesture Spotting from Visual Hull Data",
"doi": null,
"abstractUrl": "/journal/tp/2011/06/ttp2011061175/13rRUygT7gw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873969",
"title": "Gesture Spotter: A Rapid Prototyping Tool for Key Gesture Spotting in Virtual and Augmented Reality Applications",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873969/1GjwKZEQiFa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyKJiaV",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqBtj7c",
"doi": "10.1109/ICPR.2010.921",
"title": "A Framework for Hand Gesture Recognition and Spotting Using Sub-gesture Modeling",
"normalizedTitle": "A Framework for Hand Gesture Recognition and Spotting Using Sub-gesture Modeling",
"abstract": "Hand gesture interpretation is an open research problem in Human Computer Interaction (HCI), which involves locating gesture boundaries (Gesture Spotting) in a continuous video sequence and recognizing the gesture. Existing techniques model each gesture as a temporal sequence of visual features extracted from individual frames which is not efficient due to the large variability of frames at different timestamps. In this paper, we propose a new sub-gesture modeling approach which represents each gesture as a sequence of fixed sub-gestures (a group of consecutive frames with locally coherent context) and provides a robust modeling of the visual features. We further extend this approach to the task of gesture spotting where the gesture boundaries are identified using a filler model and gesture completion model. Experimental results show that the proposed method outperforms state-of-the-art Hidden Conditional Random Fields (HCRF) based methods and baseline gesture spotting techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Hand gesture interpretation is an open research problem in Human Computer Interaction (HCI), which involves locating gesture boundaries (Gesture Spotting) in a continuous video sequence and recognizing the gesture. Existing techniques model each gesture as a temporal sequence of visual features extracted from individual frames which is not efficient due to the large variability of frames at different timestamps. In this paper, we propose a new sub-gesture modeling approach which represents each gesture as a sequence of fixed sub-gestures (a group of consecutive frames with locally coherent context) and provides a robust modeling of the visual features. We further extend this approach to the task of gesture spotting where the gesture boundaries are identified using a filler model and gesture completion model. Experimental results show that the proposed method outperforms state-of-the-art Hidden Conditional Random Fields (HCRF) based methods and baseline gesture spotting techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Hand gesture interpretation is an open research problem in Human Computer Interaction (HCI), which involves locating gesture boundaries (Gesture Spotting) in a continuous video sequence and recognizing the gesture. Existing techniques model each gesture as a temporal sequence of visual features extracted from individual frames which is not efficient due to the large variability of frames at different timestamps. In this paper, we propose a new sub-gesture modeling approach which represents each gesture as a sequence of fixed sub-gestures (a group of consecutive frames with locally coherent context) and provides a robust modeling of the visual features. We further extend this approach to the task of gesture spotting where the gesture boundaries are identified using a filler model and gesture completion model. Experimental results show that the proposed method outperforms state-of-the-art Hidden Conditional Random Fields (HCRF) based methods and baseline gesture spotting techniques.",
"fno": "4109d780",
"keywords": [
"Hand Gesture Recognition",
"Human Computer Interaction",
"Sub Gesture Modeling"
],
"authors": [
{
"affiliation": null,
"fullName": "Manavender R. Malgireddy",
"givenName": "Manavender R.",
"surname": "Malgireddy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jason J. Corso",
"givenName": "Jason J.",
"surname": "Corso",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Srirangaraj Setlur",
"givenName": "Srirangaraj",
"surname": "Setlur",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Venu Govindaraju",
"givenName": "Venu",
"surname": "Govindaraju",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dinesh Mandalapu",
"givenName": "Dinesh",
"surname": "Mandalapu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "3780-3783",
"year": "2010",
"issn": "1051-4651",
"isbn": "978-0-7695-4109-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4109d776",
"articleId": "12OmNrJiCY2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4109d784",
"articleId": "12OmNAoUTsF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2010/4109/0/4109d850",
"title": "A Robust Method for Hand Gesture Segmentation and Recognition Using Forward Spotting Scheme in Conditional Random Fields",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d850/12OmNqBbHxO",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2010/4147/0/4147a240",
"title": "Movement Tracking in Real-Time Hand Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2010/4147a240/12OmNvvtGYC",
"parentPublication": {
"id": "proceedings/icis/2010/4147/0",
"title": "Computer and Information Science, ACIS International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ams/2010/4062/0/4062a237",
"title": "A Real Time Vision-Based Hand Gesture Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2010/4062a237/12OmNxE2mHJ",
"parentPublication": {
"id": "proceedings/ams/2010/4062/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icetet/2010/4246/0/4246a037",
"title": "Hand Gesture Recognition Using CAMSHIFT Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icetet/2010/4246a037/12OmNy2Jt9f",
"parentPublication": {
"id": "proceedings/icetet/2010/4246/0",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2008/3279/0/3279a096",
"title": "Optimal Consensus Intuitive Hand Gesture Vocabulary Design",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2008/3279a096/12OmNyUWQSc",
"parentPublication": {
"id": "proceedings/icsc/2008/3279/0",
"title": "2008 IEEE International Conference on Semantic Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/socpar/2009/3879/0/3879a592",
"title": "A Simple Wearable Hand Gesture Recognition Device Using iMEMS",
"doi": null,
"abstractUrl": "/proceedings-article/socpar/2009/3879a592/12OmNykCccE",
"parentPublication": {
"id": "proceedings/socpar/2009/3879/0",
"title": "Soft Computing and Pattern Recognition, International Conference of",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2010/6846/0/05444702",
"title": "Robust vision-based hand tracking using single camera for ubiquitous 3D gesture interaction",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2010/05444702/12OmNzCWG2m",
"parentPublication": {
"id": "proceedings/3dui/2010/6846/0",
"title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kse/2011/4567/0/4567a232",
"title": "Wizard of Oz for Designing Hand Gesture Vocabulary in Human-Robot Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/kse/2011/4567a232/12OmNzDehar",
"parentPublication": {
"id": "proceedings/kse/2011/4567/0",
"title": "Knowledge and Systems Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwcse/2009/3881/2/3881b072",
"title": "An Automatic Hand Gesture Recognition System Based on Viola-Jones Method and SVMs",
"doi": null,
"abstractUrl": "/proceedings-article/iwcse/2009/3881b072/12OmNzZ5oge",
"parentPublication": {
"id": "proceedings/iwcse/2009/3881/2",
"title": "Computer Science and Engineering, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/06/ttp2011061175",
"title": "Online Gesture Spotting from Visual Hull Data",
"doi": null,
"abstractUrl": "/journal/tp/2011/06/ttp2011061175/13rRUygT7gw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1ehBy9p57Q4",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"acronym": "ithings-greencom-cpscom-smartdata",
"groupId": "1800308",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1ehBChsoQyk",
"doi": "10.1109/iThings/GreenCom/CPSCom/SmartData.2019.00174",
"title": "Sensor Based Dynamic Hand Gesture Recognition by PairNet",
"normalizedTitle": "Sensor Based Dynamic Hand Gesture Recognition by PairNet",
"abstract": "This paper presents a novel feedforward neural network for sensor-based dynamic hand gesture recognition. The algorithm, termed PairNet, is capable of carrying out accurate gesture spotting for the sensory data produced by basic accelerators and gyroscopes, which are commonly deployed in internet of things devices. The gesture classification outcomes are then obtained from the spotting results by the Maximum A Posteriori (MAP) estimation. To illustrate the effectiveness of the proposed algorithm, a prototype system based on a mobile phone has been implemented. Experimental results reveal that, while attaining realtime operations, the proposed algorithm has superior accuracy over existing sensor-based counterparts for hand gesture recognition.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a novel feedforward neural network for sensor-based dynamic hand gesture recognition. The algorithm, termed PairNet, is capable of carrying out accurate gesture spotting for the sensory data produced by basic accelerators and gyroscopes, which are commonly deployed in internet of things devices. The gesture classification outcomes are then obtained from the spotting results by the Maximum A Posteriori (MAP) estimation. To illustrate the effectiveness of the proposed algorithm, a prototype system based on a mobile phone has been implemented. Experimental results reveal that, while attaining realtime operations, the proposed algorithm has superior accuracy over existing sensor-based counterparts for hand gesture recognition.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a novel feedforward neural network for sensor-based dynamic hand gesture recognition. The algorithm, termed PairNet, is capable of carrying out accurate gesture spotting for the sensory data produced by basic accelerators and gyroscopes, which are commonly deployed in internet of things devices. The gesture classification outcomes are then obtained from the spotting results by the Maximum A Posteriori (MAP) estimation. To illustrate the effectiveness of the proposed algorithm, a prototype system based on a mobile phone has been implemented. Experimental results reveal that, while attaining realtime operations, the proposed algorithm has superior accuracy over existing sensor-based counterparts for hand gesture recognition.",
"fno": "298000a994",
"keywords": [
"Feedforward Neural Nets",
"Gesture Recognition",
"Maximum Likelihood Estimation",
"Feedforward Neural Network",
"Sensor Based Dynamic Hand Gesture Recognition",
"Gesture Spotting",
"Gesture Classification Outcomes",
"Sensor Based Counterparts",
"Maximum A Posteriori Estimation",
"MAP Estimation",
"Pair Net",
"Sensory Data",
"Basic Accelerators",
"Gyroscopes",
"Gesture Recognition",
"Convolution",
"Heuristic Algorithms",
"Kernel",
"Internet Of Things",
"Gyroscopes",
"Estimation",
"Continuous Hand Gesture Recognition",
"Human Machine Interface",
"Convolutional Neural Networks"
],
"authors": [
{
"affiliation": "National Taiwan Normal University",
"fullName": "Yun-Jie Jhang",
"givenName": "Yun-Jie",
"surname": "Jhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taiwan Normal University",
"fullName": "Yen-Cheng Chu",
"givenName": "Yen-Cheng",
"surname": "Chu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "nVidia Corp",
"fullName": "Tsung-Ming Tai",
"givenName": "Tsung-Ming",
"surname": "Tai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taiwan Normal University",
"fullName": "Wen-Jyi Hwang",
"givenName": "Wen-Jyi",
"surname": "Hwang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taiwan Normal University",
"fullName": "Po-Wen Cheng",
"givenName": "Po-Wen",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "nVidia Corp",
"fullName": "Cheng-Kuang Lee",
"givenName": "Cheng-Kuang",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ithings-greencom-cpscom-smartdata",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-07-01T00:00:00",
"pubType": "proceedings",
"pages": "994-1001",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-2980-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "298000a986",
"articleId": "1ehBCYiYGU8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "298000b002",
"articleId": "1ehBEH4ZAsM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2016/1437/0/1437b206",
"title": "Skeleton-Based Dynamic Hand Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437b206/12OmNCdBDX2",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034d056",
"title": "Continuous Gesture Recognition with Hand-Oriented Spatiotemporal Feature",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034d056/12OmNCeaPUg",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d850",
"title": "A Robust Method for Hand Gesture Segmentation and Recognition Using Forward Spotting Scheme in Conditional Random Fields",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d850/12OmNqBbHxO",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d780",
"title": "A Framework for Hand Gesture Recognition and Spotting Using Sub-gesture Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d780/12OmNqBtj7c",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csi/2016/4871/0/07916962",
"title": "Integration of Hand Gesture and Multi Touch Gesture with Glove Type Device",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csi/2016/07916962/12OmNrGsDoo",
"parentPublication": {
"id": "proceedings/acit-csi/2016/4871/0",
"title": "2016 4th Intl. Conf. on Applied Computing and Information Technology (ACIT), 3rd Intl. Conf. on Computational Science/Intelligence and Applied Informatics (CSII), and 1st Intl. Conf. on Big Data, Cloud Computing, Data Science & Engineering (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2011/0774/0/05959595",
"title": "Evaluating Gesture Recognition by Multiple-Sensor-Containing Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2011/05959595/12OmNvT2pb4",
"parentPublication": {
"id": "proceedings/iswc/2011/0774/0",
"title": "2011 15th Annual International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890302",
"title": "A windowed dynamic time warping approach for 3D continuous hand gesture recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890302/12OmNy6Zs40",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607524",
"title": "Image-to-Class Dynamic Time Warping for 3D hand gesture recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607524/12OmNyTwRjD",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2019/1337/0/133700a295",
"title": "Hand Gesture Recognition with Convolution Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2019/133700a295/1dUndUhVcs0",
"parentPublication": {
"id": "proceedings/iri/2019/1337/0",
"title": "2019 IEEE 20th International Conference on Information Reuse and Integration for Data Science (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a623",
"title": "A Transformer-Based Network for Dynamic Hand Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a623/1qyxkizQDn2",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyFCvPo",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBEGYGX",
"doi": "10.1109/ICCV.2013.161",
"title": "Estimating the 3D Layout of Indoor Scenes and Its Clutter from Depth Sensors",
"normalizedTitle": "Estimating the 3D Layout of Indoor Scenes and Its Clutter from Depth Sensors",
"abstract": "In this paper we propose an approach to jointly estimate the layout of rooms as well as the clutter present in the scene using RGB-D data. Towards this goal, we propose an effective model that is able to exploit both depth and appearance features, which are complementary. Furthermore, our approach is efficient as we exploit the inherent decomposition of additive potentials. We demonstrate the effectiveness of our approach on the challenging NYU v2 dataset and show that employing depth reduces the layout error by 6% and the clutter estimation by 13%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we propose an approach to jointly estimate the layout of rooms as well as the clutter present in the scene using RGB-D data. Towards this goal, we propose an effective model that is able to exploit both depth and appearance features, which are complementary. Furthermore, our approach is efficient as we exploit the inherent decomposition of additive potentials. We demonstrate the effectiveness of our approach on the challenging NYU v2 dataset and show that employing depth reduces the layout error by 6% and the clutter estimation by 13%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we propose an approach to jointly estimate the layout of rooms as well as the clutter present in the scene using RGB-D data. Towards this goal, we propose an effective model that is able to exploit both depth and appearance features, which are complementary. Furthermore, our approach is efficient as we exploit the inherent decomposition of additive potentials. We demonstrate the effectiveness of our approach on the challenging NYU v2 dataset and show that employing depth reduces the layout error by 6% and the clutter estimation by 13%.",
"fno": "2840b273",
"keywords": [
"Layout",
"Clutter",
"Labeling",
"Geometry",
"Three Dimensional Displays",
"Estimation",
"Semantics"
],
"authors": [
{
"affiliation": null,
"fullName": "Jian Zhang",
"givenName": "Jian",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chen Kan",
"givenName": "Chen",
"surname": "Kan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Alexander G. Schwing",
"givenName": "Alexander G.",
"surname": "Schwing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Raquel Urtasun",
"givenName": "Raquel",
"surname": "Urtasun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "1273-1280",
"year": "2013",
"issn": "1550-5499",
"isbn": "978-1-4799-2840-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2840b265",
"articleId": "12OmNBpVQbW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2840b281",
"articleId": "12OmNxGALga",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391a936",
"title": "Learning Informative Edge Maps for Indoor Scene Layout Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a936/12OmNvSKNTq",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851a616",
"title": "DeLay: Robust Spatial Layout Estimation for Cluttered Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851a616/12OmNwfb6SI",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977445",
"title": "Estimating Floor Regions in Cluttered Indoor Scenes from First Person Camera View",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977445/12OmNy50gfd",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156379",
"title": "Clutter-aware label layout",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156379/12OmNyY4rqE",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a870",
"title": "Physics Inspired Optimization on Semantic Transfer Features: An Alternative Method for Room Layout Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a870/12OmNyv7mc0",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840c144",
"title": "Support Surface Prediction in Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840c144/12OmNzRqdJl",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07299091",
"title": "Separating objects and clutter in indoor scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07299091/12OmNzaQotT",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09878246",
"title": "Instant Automatic Emptying of Panoramic Indoor Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09878246/1GrP72KEfFS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600b644",
"title": "LGT-Net: Indoor Panoramic Room Layout Estimation with Geometry-Aware Transformer Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600b644/1H0O6jOOG6Q",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2020/9325/0/09232526",
"title": "LayART: Generating indoor layout using ARCore Transformations",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2020/09232526/1o56y3bZgwo",
"parentPublication": {
"id": "proceedings/bigmm/2020/9325/0",
"title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBkfRhw",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwF0BQQ",
"doi": "10.1109/CVPR.2015.7299195",
"title": "Robust reconstruction of indoor scenes",
"normalizedTitle": "Robust reconstruction of indoor scenes",
"abstract": "We present an approach to indoor scene reconstruction from RGB-D video. The key idea is to combine geometric registration of scene fragments with robust global optimization based on line processes. Geometric registration is error-prone due to sensor noise, which leads to aliasing of geometric detail and inability to disambiguate different surfaces in the scene. The presented optimization approach disables erroneous geometric alignments even when they significantly outnumber correct ones. Experimental results demonstrate that the presented approach substantially increases the accuracy of reconstructed scene models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an approach to indoor scene reconstruction from RGB-D video. The key idea is to combine geometric registration of scene fragments with robust global optimization based on line processes. Geometric registration is error-prone due to sensor noise, which leads to aliasing of geometric detail and inability to disambiguate different surfaces in the scene. The presented optimization approach disables erroneous geometric alignments even when they significantly outnumber correct ones. Experimental results demonstrate that the presented approach substantially increases the accuracy of reconstructed scene models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an approach to indoor scene reconstruction from RGB-D video. The key idea is to combine geometric registration of scene fragments with robust global optimization based on line processes. Geometric registration is error-prone due to sensor noise, which leads to aliasing of geometric detail and inability to disambiguate different surfaces in the scene. The presented optimization approach disables erroneous geometric alignments even when they significantly outnumber correct ones. Experimental results demonstrate that the presented approach substantially increases the accuracy of reconstructed scene models.",
"fno": "07299195",
"keywords": [],
"authors": [
{
"affiliation": "Stanford University, USA",
"fullName": "Sungjoon Choi",
"givenName": null,
"surname": "Sungjoon Choi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Intel Labs, USA",
"fullName": "Qian-Yi Zhou",
"givenName": "Qian-Yi",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Intel Labs, USA",
"fullName": "Vladlen Koltun",
"givenName": "Vladlen",
"surname": "Koltun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-06-01T00:00:00",
"pubType": "proceedings",
"pages": "5556-5565",
"year": "2015",
"issn": "1063-6919",
"isbn": "978-1-4673-6964-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07299194",
"articleId": "12OmNBa2iDA",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2012/1226/0/355P3A44",
"title": "Recovering free space of indoor scenes from a single image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/355P3A44/12OmNBKEynJ",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/1999/0062/0/00620348",
"title": "Indoor Scene Reconstruction from Sets of Noisy Range Images",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/1999/00620348/12OmNrkT7Nk",
"parentPublication": {
"id": "proceedings/3dim/1999/0062/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2014/4985/0/06836125",
"title": "Detecting 3D geometric boundaries of indoor scenes under varying lighting",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2014/06836125/12OmNvDI3WA",
"parentPublication": {
"id": "proceedings/wacv/2014/4985/0",
"title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989a033",
"title": "Understanding Indoor Scenes Using 3D Geometric Phrases",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989a033/12OmNwJybOy",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989c067",
"title": "Mesh Based Semantic Modelling for Indoor and Outdoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989c067/12OmNwekjC2",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a616",
"title": "Multi-planar Monocular Reconstruction of Manhattan Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a616/17D45XvMcbo",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09878246",
"title": "Instant Automatic Emptying of Panoramic Indoor Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09878246/1GrP72KEfFS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600a964",
"title": "Online Reconstruction of Indoor Scenes With Local Manhattan Frame Growing",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600a964/1iTvpwOaZ68",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09254163",
"title": "ManhattanFusion: Online Dense Reconstruction of Indoor Scenes From Depth Sequences",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09254163/1oDXGr1kjYs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/2021/03/09468410",
"title": "Reconstruction for Indoor Scenes Based on an Interpretable Inference",
"doi": null,
"abstractUrl": "/journal/ai/2021/03/09468410/1uPuPi2ZzYQ",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNz2TCuR",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwJybOy",
"doi": "10.1109/CVPR.2013.12",
"title": "Understanding Indoor Scenes Using 3D Geometric Phrases",
"normalizedTitle": "Understanding Indoor Scenes Using 3D Geometric Phrases",
"abstract": "Visual scene understanding is a difficult problem interleaving object detection, geometric reasoning and scene classification. We present a hierarchical scene model for learning and reasoning about complex indoor scenes which is computationally tractable, can be learned from a reasonable amount of training data, and avoids oversimplification. At the core of this approach is the 3D Geometric Phrase Model which captures the semantic and geometric relationships between objects which frequently co-occur in the same 3D spatial configuration. Experiments show that this model effectively explains scene semantics, geometry and object groupings from a single image, while also improving individual object detections.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual scene understanding is a difficult problem interleaving object detection, geometric reasoning and scene classification. We present a hierarchical scene model for learning and reasoning about complex indoor scenes which is computationally tractable, can be learned from a reasonable amount of training data, and avoids oversimplification. At the core of this approach is the 3D Geometric Phrase Model which captures the semantic and geometric relationships between objects which frequently co-occur in the same 3D spatial configuration. Experiments show that this model effectively explains scene semantics, geometry and object groupings from a single image, while also improving individual object detections.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual scene understanding is a difficult problem interleaving object detection, geometric reasoning and scene classification. We present a hierarchical scene model for learning and reasoning about complex indoor scenes which is computationally tractable, can be learned from a reasonable amount of training data, and avoids oversimplification. At the core of this approach is the 3D Geometric Phrase Model which captures the semantic and geometric relationships between objects which frequently co-occur in the same 3D spatial configuration. Experiments show that this model effectively explains scene semantics, geometry and object groupings from a single image, while also improving individual object detections.",
"fno": "4989a033",
"keywords": [],
"authors": [
{
"affiliation": null,
"fullName": "Wongun Choi",
"givenName": "Wongun",
"surname": "Choi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yu-Wei Chao",
"givenName": "Yu-Wei",
"surname": "Chao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Caroline Pantofaru",
"givenName": "Caroline",
"surname": "Pantofaru",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Silvio Savarese",
"givenName": "Silvio",
"surname": "Savarese",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-06-01T00:00:00",
"pubType": "proceedings",
"pages": "33-40",
"year": "2013",
"issn": "1063-6919",
"isbn": "978-0-7695-4989-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4989a025",
"articleId": "12OmNBzAcm8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4989a041",
"articleId": "12OmNvStcOR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2011/0063/0/06130294",
"title": "Revisiting 3D geometric models for accurate object shape and pose",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130294/12OmNAOsMKB",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/355P3A44",
"title": "Recovering free space of indoor scenes from a single image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/355P3A44/12OmNBKEynJ",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/344P3A33",
"title": "Bayesian geometric modeling of indoor scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/344P3A33/12OmNBt3qj0",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2014/4985/0/06836125",
"title": "Detecting 3D geometric boundaries of indoor scenes under varying lighting",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2014/06836125/12OmNvDI3WA",
"parentPublication": {
"id": "proceedings/wacv/2014/4985/0",
"title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459211",
"title": "Decomposing a scene into geometric and semantically consistent regions",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459211/12OmNwD1q5T",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07299195",
"title": "Robust reconstruction of indoor scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07299195/12OmNwF0BQQ",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989c067",
"title": "Mesh Based Semantic Modelling for Indoor and Outdoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989c067/12OmNwekjC2",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07299091",
"title": "Separating objects and clutter in indoor scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07299091/12OmNzaQotT",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/04/ttp2013040882",
"title": "Monocular Visual Scene Understanding: Understanding Multi-Object Traffic Scenes",
"doi": null,
"abstractUrl": "/journal/tp/2013/04/ttp2013040882/13rRUwdrdLY",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10115261",
"title": "Leveraging Commonsense for Object Localisation in Partial Scenes",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10115261/1MQvaQmp9Ac",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqH9hnp",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwfb6SI",
"doi": "10.1109/CVPR.2016.73",
"title": "DeLay: Robust Spatial Layout Estimation for Cluttered Indoor Scenes",
"normalizedTitle": "DeLay: Robust Spatial Layout Estimation for Cluttered Indoor Scenes",
"abstract": "We consider the problem of estimating the spatial layout of an indoor scene from a monocular RGB image, modeled as the projection of a 3D cuboid. Existing solutions to this problem often rely strongly on hand-engineered features and vanishing point detection, which are prone to failure in the presence of clutter. In this paper, we present a method that uses a fully convolutional neural network (FCNN) in conjunction with a novel optimization framework for generating layout estimates. We demonstrate that our method is robust in the presence of clutter and handles a wide range of highly challenging scenes. We evaluate our method on two standard benchmarks and show that it achieves state of the art results, outperforming previous methods by a wide margin.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We consider the problem of estimating the spatial layout of an indoor scene from a monocular RGB image, modeled as the projection of a 3D cuboid. Existing solutions to this problem often rely strongly on hand-engineered features and vanishing point detection, which are prone to failure in the presence of clutter. In this paper, we present a method that uses a fully convolutional neural network (FCNN) in conjunction with a novel optimization framework for generating layout estimates. We demonstrate that our method is robust in the presence of clutter and handles a wide range of highly challenging scenes. We evaluate our method on two standard benchmarks and show that it achieves state of the art results, outperforming previous methods by a wide margin.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We consider the problem of estimating the spatial layout of an indoor scene from a monocular RGB image, modeled as the projection of a 3D cuboid. Existing solutions to this problem often rely strongly on hand-engineered features and vanishing point detection, which are prone to failure in the presence of clutter. In this paper, we present a method that uses a fully convolutional neural network (FCNN) in conjunction with a novel optimization framework for generating layout estimates. We demonstrate that our method is robust in the presence of clutter and handles a wide range of highly challenging scenes. We evaluate our method on two standard benchmarks and show that it achieves state of the art results, outperforming previous methods by a wide margin.",
"fno": "8851a616",
"keywords": [
"Layout",
"Clutter",
"Semantics",
"Estimation",
"Neural Networks",
"Robustness",
"Pipelines"
],
"authors": [
{
"affiliation": null,
"fullName": "Saumitro Dasgupta",
"givenName": "Saumitro",
"surname": "Dasgupta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kuan Fang",
"givenName": "Kuan",
"surname": "Fang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kevin Chen",
"givenName": "Kevin",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Silvio Savarese",
"givenName": "Silvio",
"surname": "Savarese",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "616-624",
"year": "2016",
"issn": "1063-6919",
"isbn": "978-1-4673-8851-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8851a607",
"articleId": "12OmNyKa66d",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8851a625",
"articleId": "12OmNA0dMIX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2013/2840/0/2840b273",
"title": "Estimating the 3D Layout of Indoor Scenes and Its Clutter from Depth Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840b273/12OmNBEGYGX",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459411",
"title": "Recovering the spatial layout of cluttered rooms",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459411/12OmNwEJ0PD",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977445",
"title": "Estimating Floor Regions in Cluttered Indoor Scenes from First Person Camera View",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977445/12OmNy50gfd",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156379",
"title": "Clutter-aware label layout",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156379/12OmNyY4rqE",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a870",
"title": "Physics Inspired Optimization on Semantic Transfer Features: An Alternative Method for Room Layout Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a870/12OmNyv7mc0",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989d065",
"title": "Manhattan Junction Catalogue for Spatial Reasoning of Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989d065/12OmNznkK2z",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1999/05/i0433",
"title": "Using Spin Images for Efficient Object Recognition in Cluttered 3D Scenes",
"doi": null,
"abstractUrl": "/journal/tp/1999/05/i0433/13rRUx0gevS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546278",
"title": "Indoor Scene Layout Estimation from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546278/17D45XvMcb4",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09878246",
"title": "Instant Automatic Emptying of Panoramic Indoor Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09878246/1GrP72KEfFS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c133",
"title": "Zillow Indoor Dataset: Annotated Floor Plans With 360° Panoramas and 3D Room Layouts",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c133/1yeKk438NjO",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrNh0vw",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy50gfd",
"doi": "10.1109/ICPR.2014.733",
"title": "Estimating Floor Regions in Cluttered Indoor Scenes from First Person Camera View",
"normalizedTitle": "Estimating Floor Regions in Cluttered Indoor Scenes from First Person Camera View",
"abstract": "The ability to detect floor regions from an image enables a variety of applications such as indoor scene understanding, mobility assessment, robot navigation, path planning and surveillance. In this work, we propose a framework for estimating floor regions in cluttered indoor environments. The problem of floor detection and segmentation is challenging in situations where floor and non-floor regions have similar appearances. It is even harder to segment floor regions when clutter, specular reflections, shadows and textured floors are present within the scene. Our framework utilizes a generic classifier trained from appearance cues as well as floor density estimates, both trained from a variety of indoor images. The results of the classifier is then adapted to a specific test image where we integrate appearance, position and geometric cues in an iterative framework. A Markov Random Field framework is used to integrate the cues to segment floor regions. In contrast to previous settings that relied on optical flow, depth sensors or multiple images in a calibrated setup, our method can work on a single image. It is also more flexible as we avoid assumptions like Manhattan world scene or restricting clutter only to wall-floor boundaries. Experimental results on the public MIT Scene dataset as well as a more challenging dataset that we acquired, demonstrate the robustness and efficiency of our framework on the above mentioned complex situations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The ability to detect floor regions from an image enables a variety of applications such as indoor scene understanding, mobility assessment, robot navigation, path planning and surveillance. In this work, we propose a framework for estimating floor regions in cluttered indoor environments. The problem of floor detection and segmentation is challenging in situations where floor and non-floor regions have similar appearances. It is even harder to segment floor regions when clutter, specular reflections, shadows and textured floors are present within the scene. Our framework utilizes a generic classifier trained from appearance cues as well as floor density estimates, both trained from a variety of indoor images. The results of the classifier is then adapted to a specific test image where we integrate appearance, position and geometric cues in an iterative framework. A Markov Random Field framework is used to integrate the cues to segment floor regions. In contrast to previous settings that relied on optical flow, depth sensors or multiple images in a calibrated setup, our method can work on a single image. It is also more flexible as we avoid assumptions like Manhattan world scene or restricting clutter only to wall-floor boundaries. Experimental results on the public MIT Scene dataset as well as a more challenging dataset that we acquired, demonstrate the robustness and efficiency of our framework on the above mentioned complex situations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The ability to detect floor regions from an image enables a variety of applications such as indoor scene understanding, mobility assessment, robot navigation, path planning and surveillance. In this work, we propose a framework for estimating floor regions in cluttered indoor environments. The problem of floor detection and segmentation is challenging in situations where floor and non-floor regions have similar appearances. It is even harder to segment floor regions when clutter, specular reflections, shadows and textured floors are present within the scene. Our framework utilizes a generic classifier trained from appearance cues as well as floor density estimates, both trained from a variety of indoor images. The results of the classifier is then adapted to a specific test image where we integrate appearance, position and geometric cues in an iterative framework. A Markov Random Field framework is used to integrate the cues to segment floor regions. In contrast to previous settings that relied on optical flow, depth sensors or multiple images in a calibrated setup, our method can work on a single image. It is also more flexible as we avoid assumptions like Manhattan world scene or restricting clutter only to wall-floor boundaries. Experimental results on the public MIT Scene dataset as well as a more challenging dataset that we acquired, demonstrate the robustness and efficiency of our framework on the above mentioned complex situations.",
"fno": "06977445",
"keywords": [
"Accuracy",
"Clutter",
"Support Vector Machines",
"Image Segmentation",
"Estimation",
"Cameras",
"Floors",
"Cluttered Indoor Scenes",
"Scene Understanding",
"Floor Segmentation"
],
"authors": [
{
"affiliation": null,
"fullName": "Sanchit Aggarwal",
"givenName": "Sanchit",
"surname": "Aggarwal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Anoop M. Namboodiri",
"givenName": "Anoop M.",
"surname": "Namboodiri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "C.V. Jawahar",
"givenName": "C.V.",
"surname": "Jawahar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "4275-4280",
"year": "2014",
"issn": "1051-4651",
"isbn": "978-1-4799-5209-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06977444",
"articleId": "12OmNB0Fxhh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06977446",
"articleId": "12OmNwDACvO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/1990/2057/0/00139596",
"title": "Region-based reconstruction of an indoor scene using an integration of active and passive sensing techniques",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1990/00139596/12OmNASraZB",
"parentPublication": {
"id": "proceedings/iccv/1990/2057/0",
"title": "Proceedings Third International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/1/71080264",
"title": "Detecting driveable floor regions",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71080264/12OmNAolGRd",
"parentPublication": {
"id": "proceedings/iros/1995/7108/1",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2013/5075/0/06529447",
"title": "Adaptive context-agnostic floor transition detection on smart mobile devices",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2013/06529447/12OmNBlofOa",
"parentPublication": {
"id": "proceedings/percomw/2013/5075/0",
"title": "2013 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a546",
"title": "Behind the Scenes: What Moving Targets Reveal about Static Scene Geometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a546/12OmNC0PGLN",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2015/9101/0/9101a416",
"title": "BarFi: Barometer-Aided Wi-Fi Floor Localization Using Crowdsourcing",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2015/9101a416/12OmNCdBDF7",
"parentPublication": {
"id": "proceedings/mass/2015/9101/0",
"title": "2015 IEEE 12th International Conference on Mobile Ad Hoc and Sensor Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2015/7644/0/7644a149",
"title": "An Indoor Scene Recognition Algorithm Based on Pressure Change Pattern",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2015/7644a149/12OmNqGA52F",
"parentPublication": {
"id": "proceedings/icicta/2015/7644/0",
"title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2000/0662/1/06621804",
"title": "Detecting People in Cluttered Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2000/06621804/12OmNrAMF3z",
"parentPublication": {
"id": "proceedings/cvpr/2000/0662/1",
"title": "Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No.PR00662)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851a616",
"title": "DeLay: Robust Spatial Layout Estimation for Cluttered Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851a616/12OmNwfb6SI",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209d505",
"title": "Pose Invariant Activity Classification for Multi-floor Indoor Localization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209d505/12OmNy2rRTy",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047367",
"title": "Parsing Indoor Scenes from RGB-D Image Using Superpixel and Region Merging",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047367/1iC6D9QWozm",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0",
"title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBkfRhw",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzaQotT",
"doi": "10.1109/CVPR.2015.7299091",
"title": "Separating objects and clutter in indoor scenes",
"normalizedTitle": "Separating objects and clutter in indoor scenes",
"abstract": "Objects' spatial layout estimation and clutter identification are two important tasks to understand indoor scenes. We propose to solve both of these problems in a joint framework using RGBD images of indoor scenes. In contrast to recent approaches which focus on either one of these two problems, we perform ‘fine grained structure categorization’ by predicting all the major objects and simultaneously labeling the cluttered regions. A conditional random field model is proposed to incorporate a rich set of local appearance, geometric features and interactions between the scene elements. We take a structural learning approach with a loss of 3D localisation to estimate the model parameters from a large annotated RGBD dataset, and a mixed integer linear programming formulation for inference. We demonstrate that our approach is able to detect cuboids and estimate cluttered regions across many different object and scene categories in the presence of occlusion, illumination and appearance variations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Objects' spatial layout estimation and clutter identification are two important tasks to understand indoor scenes. We propose to solve both of these problems in a joint framework using RGBD images of indoor scenes. In contrast to recent approaches which focus on either one of these two problems, we perform ‘fine grained structure categorization’ by predicting all the major objects and simultaneously labeling the cluttered regions. A conditional random field model is proposed to incorporate a rich set of local appearance, geometric features and interactions between the scene elements. We take a structural learning approach with a loss of 3D localisation to estimate the model parameters from a large annotated RGBD dataset, and a mixed integer linear programming formulation for inference. We demonstrate that our approach is able to detect cuboids and estimate cluttered regions across many different object and scene categories in the presence of occlusion, illumination and appearance variations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Objects' spatial layout estimation and clutter identification are two important tasks to understand indoor scenes. We propose to solve both of these problems in a joint framework using RGBD images of indoor scenes. In contrast to recent approaches which focus on either one of these two problems, we perform ‘fine grained structure categorization’ by predicting all the major objects and simultaneously labeling the cluttered regions. A conditional random field model is proposed to incorporate a rich set of local appearance, geometric features and interactions between the scene elements. We take a structural learning approach with a loss of 3D localisation to estimate the model parameters from a large annotated RGBD dataset, and a mixed integer linear programming formulation for inference. We demonstrate that our approach is able to detect cuboids and estimate cluttered regions across many different object and scene categories in the presence of occlusion, illumination and appearance variations.",
"fno": "07299091",
"keywords": [],
"authors": [
{
"affiliation": "School of CSSE UWA, Australia",
"fullName": "S. H. Khan",
"givenName": "S. H.",
"surname": "Khan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NICTA and ANU, Australia",
"fullName": "Xuming He",
"givenName": null,
"surname": "Xuming He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of CSSE UWA, Australia",
"fullName": "M. Bannamoun",
"givenName": "M.",
"surname": "Bannamoun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of CSSE UWA, Australia",
"fullName": "F. Sohel",
"givenName": "F.",
"surname": "Sohel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of EECE UWA, Australia",
"fullName": "R. Togneri",
"givenName": "R.",
"surname": "Togneri",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4603-4611",
"year": "2015",
"issn": "1063-6919",
"isbn": "978-1-4673-6964-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07299090",
"articleId": "12OmNyGbIcx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07299092",
"articleId": "12OmNCeaPYA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2009/3992/0/05206537",
"title": "Recognizing indoor scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206537/12OmNAJm0ow",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2008/3341/0/3341a291",
"title": "Shadow Removal in Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2008/3341a291/12OmNAXPyoV",
"parentPublication": {
"id": "proceedings/avss/2008/3341/0",
"title": "2008 IEEE Fifth International Conference on Advanced Video and Signal Based Surveillance",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2000/0662/1/06621804",
"title": "Detecting People in Cluttered Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2000/06621804/12OmNrAMF3z",
"parentPublication": {
"id": "proceedings/cvpr/2000/0662/1",
"title": "Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No.PR00662)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989a564",
"title": "Perceptual Organization and Recognition of Indoor Scenes from RGB-D Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989a564/12OmNs59JDU",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a001",
"title": "Matching Deformable Objects in Clutter",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a001/12OmNvC0sW5",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2009/3718/0/3718a158",
"title": "Multi-cue Based Visual Tracking in Clutter Scenes with Occlusions",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2009/3718a158/12OmNwdbV0L",
"parentPublication": {
"id": "proceedings/avss/2009/3718/0",
"title": "2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989c067",
"title": "Mesh Based Semantic Modelling for Indoor and Outdoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989c067/12OmNwekjC2",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840c144",
"title": "Support Surface Prediction in Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840c144/12OmNzRqdJl",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/04/08883084",
"title": "Active Arrangement of Small Objects in 3D Indoor Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2021/04/08883084/1epRSep15Wo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08737746",
"title": "Clouds of Oriented Gradients for 3D Detection of Objects, Surfaces, and Indoor Scene Layouts",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08737746/1mP22G2vmOA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1IFJPWahnva",
"title": "2022 2nd International Conference on Frontiers of Electronics, Information and Computation Technologies (ICFEICT)",
"acronym": "icfeict",
"groupId": "9951325",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1IFJRNJi6NG",
"doi": "10.1109/ICFEICT57213.2022.00036",
"title": "Indoor Human Location Method for FMCW Radar Using Standard Deviation Weighting",
"normalizedTitle": "Indoor Human Location Method for FMCW Radar Using Standard Deviation Weighting",
"abstract": "For indoor detection radar, a challenging problem is to suppress stationary clutter in indoor environments. In this paper, we propose a simple method to improve the visibility of human in clutter. Considering the motion nature of the human body, the standard deviation of each range bin that represents the fluctuation level across the slow time window is exploited to weight the range-angle spectrum. Consequently, the human location is highlighted and the static clutter is suppressed. Experiments from the actual radar data demonstrates that even if multiple targets are in the same range bin the proposed method can still distinguish them and suppress the strong clutter interference at very low cost.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For indoor detection radar, a challenging problem is to suppress stationary clutter in indoor environments. In this paper, we propose a simple method to improve the visibility of human in clutter. Considering the motion nature of the human body, the standard deviation of each range bin that represents the fluctuation level across the slow time window is exploited to weight the range-angle spectrum. Consequently, the human location is highlighted and the static clutter is suppressed. Experiments from the actual radar data demonstrates that even if multiple targets are in the same range bin the proposed method can still distinguish them and suppress the strong clutter interference at very low cost.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For indoor detection radar, a challenging problem is to suppress stationary clutter in indoor environments. In this paper, we propose a simple method to improve the visibility of human in clutter. Considering the motion nature of the human body, the standard deviation of each range bin that represents the fluctuation level across the slow time window is exploited to weight the range-angle spectrum. Consequently, the human location is highlighted and the static clutter is suppressed. Experiments from the actual radar data demonstrates that even if multiple targets are in the same range bin the proposed method can still distinguish them and suppress the strong clutter interference at very low cost.",
"fno": "547600a159",
"keywords": [
"CW Radar",
"FM Radar",
"Radar Clutter",
"Radar Detection",
"Radar Tracking",
"Actual Radar Data",
"Fluctuation Level",
"FMCW Radar",
"Human Body",
"Indoor Detection Radar",
"Indoor Environments",
"Indoor Human Location Method",
"Motion Nature",
"Range Bin",
"Range Angle Spectrum",
"Slow Time Window",
"Standard Deviation Weighting",
"Static Clutter",
"Stationary Clutter",
"Strong Clutter Interference",
"Fluctuations",
"Costs",
"Radar Clutter",
"Radar Detection",
"Indoor Environment",
"Clutter",
"Standards",
"Indoor Human Location",
"FMCW Radar",
"Standard Deviation",
"Range Angle Spectrum"
],
"authors": [
{
"affiliation": "Foshan University,School of Mechatronic Engineering and Automation,Foshan,China",
"fullName": "Yihong Luo",
"givenName": "Yihong",
"surname": "Luo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Guangdong University of Education,School of Computer Science,Guangzhou,China",
"fullName": "Xiaoxia Li",
"givenName": "Xiaoxia",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icfeict",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "159-163",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5476-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "547600a153",
"articleId": "1IFK3jgSJaM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "547600a164",
"articleId": "1IFK8t58lhu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciii/2008/3435/1/3435a179",
"title": "A Model for the Ionospheric Clutter in HFSWR Radar",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2008/3435a179/12OmNBkxsox",
"parentPublication": {
"id": "proceedings/iciii/2008/3435/1",
"title": "2008 International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682304",
"title": "Design Procedures and Considerations of FOD Detection Millimeter-Wave FMCW Radar",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682304/12OmNs0C9AO",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pcspa/2010/4180/0/4180a703",
"title": "Analysis of Return Signal Mechanism in Ship-Board Radar",
"doi": null,
"abstractUrl": "/proceedings-article/pcspa/2010/4180a703/12OmNvjyxUG",
"parentPublication": {
"id": "proceedings/pcspa/2010/4180/0",
"title": "Pervasive Computing, Signal Porcessing and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/3/01326699",
"title": "Radar irregular sampling",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326699/12OmNvqmUGx",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/2/3962c370",
"title": "Ground Clutter Removing for Wind Profiler Radar Signal Using Adaptive Wavelet Threshold",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962c370/12OmNwDAC8t",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/2",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2012/1365/0/06274842",
"title": "Research of X-Band Radar Sea Clutter Image Simulation Method",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2012/06274842/12OmNwJybR7",
"parentPublication": {
"id": "proceedings/cso/2012/1365/0",
"title": "2012 Fifth International Joint Conference on Computational Sciences and Optimization (CSO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2017/4662/0/08388313",
"title": "Radar environment characterization by signal processing techniques",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2017/08388313/12OmNzXnNqd",
"parentPublication": {
"id": "proceedings/isspit/2017/4662/0",
"title": "2017 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2018/1360/0/136000b436",
"title": "A Range Estimator of a Stationary Human among Stationary Clutter for Vital FMCW Radar",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2018/136000b436/1gjRCI2YuIw",
"parentPublication": {
"id": "proceedings/csci/2018/1360/0",
"title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispds/2020/9668/0/966800a277",
"title": "Arc Array Radar IAA-STAP Algorithm Based on Sparse Constraint",
"doi": null,
"abstractUrl": "/proceedings-article/ispds/2020/966800a277/1oRiWWpOBtC",
"parentPublication": {
"id": "proceedings/ispds/2020/9668/0",
"title": "2020 International Conference on Information Science, Parallel and Distributed Systems (ISPDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2021/9101/0/09334023",
"title": "Vital information extraction using FMCW radar",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2021/09334023/1qTrNwOCyME",
"parentPublication": {
"id": "proceedings/icoin/2021/9101/0",
"title": "2021 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3np2MlqIo",
"doi": "10.1109/CVPR42600.2020.00097",
"title": "Geometric Structure Based and Regularized Depth Estimation From 360 Indoor Imagery",
"normalizedTitle": "Geometric Structure Based and Regularized Depth Estimation From 360 Indoor Imagery",
"abstract": "Motivated by the correlation between the depth and the geometric structure of a 360 indoor image, we propose a novel learning-based depth estimation framework that leverages the geometric structure of a scene to conduct depth estimation. Specifically, we represent the geometric structure of an indoor scene as a collection of corners, boundaries and planes. On the one hand, once a depth map is estimated, this geometric structure can be inferred from the estimated depth map; thus, the geometric structure functions as a regularizer for depth estimation. On the other hand, this estimation also benefits from the geometric structure of a scene estimated from an image where the structure functions as a prior. However, furniture in indoor scenes makes it challenging to infer geometric structure from depth or image data. An attention map is inferred to facilitate both depth estimation from features of the geometric structure and also geometric inferences from the estimated depth map. To validate the effectiveness of each component in our framework under controlled conditions, we render a synthetic dataset, Shanghaitech-Kujiale Indoor 360 dataset with 3550 360 indoor images. Extensive experiments on popular datasets validate the effectiveness of our solution. We also demonstrate that our method can also be applied to counterfactual depth.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Motivated by the correlation between the depth and the geometric structure of a 360 indoor image, we propose a novel learning-based depth estimation framework that leverages the geometric structure of a scene to conduct depth estimation. Specifically, we represent the geometric structure of an indoor scene as a collection of corners, boundaries and planes. On the one hand, once a depth map is estimated, this geometric structure can be inferred from the estimated depth map; thus, the geometric structure functions as a regularizer for depth estimation. On the other hand, this estimation also benefits from the geometric structure of a scene estimated from an image where the structure functions as a prior. However, furniture in indoor scenes makes it challenging to infer geometric structure from depth or image data. An attention map is inferred to facilitate both depth estimation from features of the geometric structure and also geometric inferences from the estimated depth map. To validate the effectiveness of each component in our framework under controlled conditions, we render a synthetic dataset, Shanghaitech-Kujiale Indoor 360 dataset with 3550 360 indoor images. Extensive experiments on popular datasets validate the effectiveness of our solution. We also demonstrate that our method can also be applied to counterfactual depth.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Motivated by the correlation between the depth and the geometric structure of a 360 indoor image, we propose a novel learning-based depth estimation framework that leverages the geometric structure of a scene to conduct depth estimation. Specifically, we represent the geometric structure of an indoor scene as a collection of corners, boundaries and planes. On the one hand, once a depth map is estimated, this geometric structure can be inferred from the estimated depth map; thus, the geometric structure functions as a regularizer for depth estimation. On the other hand, this estimation also benefits from the geometric structure of a scene estimated from an image where the structure functions as a prior. However, furniture in indoor scenes makes it challenging to infer geometric structure from depth or image data. An attention map is inferred to facilitate both depth estimation from features of the geometric structure and also geometric inferences from the estimated depth map. To validate the effectiveness of each component in our framework under controlled conditions, we render a synthetic dataset, Shanghaitech-Kujiale Indoor 360 dataset with 3550 360 indoor images. Extensive experiments on popular datasets validate the effectiveness of our solution. We also demonstrate that our method can also be applied to counterfactual depth.",
"fno": "716800a886",
"keywords": [
"Estimation Theory",
"Geometry",
"Image Resolution",
"Learning Artificial Intelligence",
"Geometric Structure",
"360 Indoor Image",
"Learning Based Depth Estimation Framework",
"Indoor Scene",
"Estimation",
"Convolution",
"Layout",
"Programmable Logic Arrays",
"Task Analysis",
"Three Dimensional Displays",
"Feeds"
],
"authors": [
{
"affiliation": "ShanghaiTech University",
"fullName": "Lei Jin",
"givenName": "Lei",
"surname": "Jin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University",
"fullName": "Yanyu Xu",
"givenName": "Yanyu",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University",
"fullName": "Jia Zheng",
"givenName": "Jia",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KooLab, Kujiale.com",
"fullName": "Junfei Zhang",
"givenName": "Junfei",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KooLab, Kujiale.com",
"fullName": "Rui Tang",
"givenName": "Rui",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai University",
"fullName": "Shugong Xu",
"givenName": "Shugong",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University",
"fullName": "Jingyi Yu",
"givenName": "Jingyi",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University",
"fullName": "Shenghua Gao",
"givenName": "Shenghua",
"surname": "Gao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "886-895",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800a876",
"articleId": "1m3nrFHOuCk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800a896",
"articleId": "1m3nmjQb31K",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-graphics/2013/2576/0/06815041",
"title": "Indoor Structure Understanding from Single 360 Cylindrical Panoramic Image",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815041/12OmNwdtwl3",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900f169",
"title": "Rethinking Supervised Depth Estimation for 360° Panoramic Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900f169/1G57c9ZfWCI",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09878246",
"title": "Instant Automatic Emptying of Panoramic Indoor Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09878246/1GrP72KEfFS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c791",
"title": "OmniFusion: 360 Monocular Depth Estimation via Geometry-Aware Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c791/1H0NXIaExGM",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d056",
"title": "360MVSNet: Deep Multi-view Stereo Network with 360° Images for Indoor Scene Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d056/1L8qkd9hTbi",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a076",
"title": "Pano Popups: Indoor 3D Reconstruction with a Plane-Aware Network",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a076/1ezRCDzaOxW",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093262",
"title": "360-Indoor: Towards Learning Real-World Objects in 360° Indoor Equirectangular Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093262/1jPbAWPyE8g",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900m2951",
"title": "LED<sup>2</sup>-Net: Monocular 360° Layout Estimation via Differentiable Depth Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900m2951/1yeHY8CW1Ne",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c573",
"title": "HoHoNet: 360 Indoor Holistic Understanding with Latent Horizontal Features",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c573/1yeIsDrhquc",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5348",
"title": "SSLayout360: Semi-Supervised Indoor Layout Estimation from 360° Panorama",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5348/1yeLn6wpowE",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxdm4IJ",
"title": "2015 IEEE/ACM 8th International Workshop on Cooperative and Human Aspects of Software Engineering (CHASE)",
"acronym": "chase",
"groupId": "1002764",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCbCrVw",
"doi": "10.1109/CHASE.2015.28",
"title": "Real-Time Monitoring of Neural State in Assessing and Improving Software Developers' Productivity",
"normalizedTitle": "Real-Time Monitoring of Neural State in Assessing and Improving Software Developers' Productivity",
"abstract": "Productivity has always been considered a crucial factor for the success of any business, and the same applies to software development. As a result of software development being almost entirely a cognitive task, problems in cognition highly correlate to problems in productivity. Being able to monitor the neural state of developers in real-time can aid in detecting and handling such cognitive problems before they occur and cause any damage. This also means aiding software developers in taking sufficient breaks, assigning tasks appropriate to their knowledge level, managing deadlines and stress, and so on. In this paper we propose Emendo - a conceptual system for continuous monitoring of developers' neural state using an off-the-shelf device. Furthermore, we provide a pilot study on the usability and feasibility of the proposed device for continuous monitoring. We also provide a short discussion of the ethical and acceptance issues of monitoring systems. Our goal is to introduce the possibility of real-time neural state monitoring and its potential benefits to the research community, hopefully attracting more researchers in this research field.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Productivity has always been considered a crucial factor for the success of any business, and the same applies to software development. As a result of software development being almost entirely a cognitive task, problems in cognition highly correlate to problems in productivity. Being able to monitor the neural state of developers in real-time can aid in detecting and handling such cognitive problems before they occur and cause any damage. This also means aiding software developers in taking sufficient breaks, assigning tasks appropriate to their knowledge level, managing deadlines and stress, and so on. In this paper we propose Emendo - a conceptual system for continuous monitoring of developers' neural state using an off-the-shelf device. Furthermore, we provide a pilot study on the usability and feasibility of the proposed device for continuous monitoring. We also provide a short discussion of the ethical and acceptance issues of monitoring systems. Our goal is to introduce the possibility of real-time neural state monitoring and its potential benefits to the research community, hopefully attracting more researchers in this research field.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Productivity has always been considered a crucial factor for the success of any business, and the same applies to software development. As a result of software development being almost entirely a cognitive task, problems in cognition highly correlate to problems in productivity. Being able to monitor the neural state of developers in real-time can aid in detecting and handling such cognitive problems before they occur and cause any damage. This also means aiding software developers in taking sufficient breaks, assigning tasks appropriate to their knowledge level, managing deadlines and stress, and so on. In this paper we propose Emendo - a conceptual system for continuous monitoring of developers' neural state using an off-the-shelf device. Furthermore, we provide a pilot study on the usability and feasibility of the proposed device for continuous monitoring. We also provide a short discussion of the ethical and acceptance issues of monitoring systems. Our goal is to introduce the possibility of real-time neural state monitoring and its potential benefits to the research community, hopefully attracting more researchers in this research field.",
"fno": "7031a093",
"keywords": [
"Electroencephalography",
"Monitoring",
"Sensors",
"Productivity",
"Real Time Systems",
"Usability",
"EEG",
"Neural Monitoring",
"Brain Computer Interface",
"Human Aspects In Software Engineering",
"Emotiv EPOC",
"Electroencephalography"
],
"authors": [
{
"affiliation": null,
"fullName": "Stevche Radevski",
"givenName": "Stevche",
"surname": "Radevski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hideaki Hata",
"givenName": "Hideaki",
"surname": "Hata",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kenichi Matsumoto",
"givenName": "Kenichi",
"surname": "Matsumoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "chase",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-05-01T00:00:00",
"pubType": "proceedings",
"pages": "93-96",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7031-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7031a089",
"articleId": "12OmNxWcHbY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7031a097",
"articleId": "12OmNCfSqRn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/esem/2017/4039/0/4039a105",
"title": "Characterizing Software Developers by Perceptions of Productivity",
"doi": null,
"abstractUrl": "/proceedings-article/esem/2017/4039a105/12OmNxcdG3z",
"parentPublication": {
"id": "proceedings/esem/2017/4039/0",
"title": "2017 ACM/IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwsm-mensura/2014/4174/0/4174a173",
"title": "Productivity Monitoring Process Using FPA - Improving Your Development Process Using Productivity Indicators",
"doi": null,
"abstractUrl": "/proceedings-article/iwsm-mensura/2014/4174a173/12OmNzZWbNx",
"parentPublication": {
"id": "proceedings/iwsm-mensura/2014/4174/0",
"title": "2014 Joint Conference of the International Workshop on Software Measurement and the International Conference on Software Process and Product Measurement (IWSM-MENSURA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2018/5663/0/566301a480",
"title": "Fostering Software Developers' Productivity at Work Through Self-Monitoring and Goal-Setting",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2018/566301a480/13bd1tl2omA",
"parentPublication": {
"id": "proceedings/icse-companion/2018/5663/0",
"title": "2018 IEEE/ACM 40th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2017/12/07829407",
"title": "The Work Life of Developers: Activities, Switches and Perceived Productivity",
"doi": null,
"abstractUrl": "/journal/ts/2017/12/07829407/13rRUEgarD9",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/1996/12/e0875",
"title": "Improving Speed and Productivity of Software Development: A Global Survey of Software Developers",
"doi": null,
"abstractUrl": "/journal/ts/1996/12/e0875/13rRUxjQyqN",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2021/03/08643844",
"title": "What Predicts Software Developers’ Productivity?",
"doi": null,
"abstractUrl": "/journal/ts/2021/03/08643844/17PYElnHbL7",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2022/9342/0/934200a026",
"title": "How Developers and Managers Define and Trade Productivity for Quality",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2022/934200a026/1Eo5LNoA2ac",
"parentPublication": {
"id": "proceedings/chase/2022/9342/0",
"title": "2022 IEEE/ACM 15th International Workshop on Cooperative and Human Aspects of Software Engineering (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/semotion/2019/2280/0/228000a013",
"title": "Towards Recognizing the Emotions of Developers Using Biometrics: The Design of a Field Study",
"doi": null,
"abstractUrl": "/proceedings-article/semotion/2019/228000a013/1d9UnrR5N2U",
"parentPublication": {
"id": "proceedings/semotion/2019/2280/0",
"title": "2019 IEEE/ACM 4th International Workshop on Emotion Awareness in Software Engineering (SEmotion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2022/09/09449979",
"title": "Emotions and Perceived Productivity of Software Developers at the Workplace",
"doi": null,
"abstractUrl": "/journal/ts/2022/09/09449979/1uiiTCkThG8",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a459",
"title": "Designing Augmented Reality Virtual Displays for Productivity Work",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a459/1yeQDpgn9ks",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzvQI1G",
"title": "Software Engineering Advances, International Conference on",
"acronym": "icsea",
"groupId": "1001267",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvUaNkh",
"doi": "10.1109/ICSEA.2010.37",
"title": "A Review of Productivity Factors and Strategies on Software Development",
"normalizedTitle": "A Review of Productivity Factors and Strategies on Software Development",
"abstract": "Since the late seventies, efforts to catalog factors that influences productivity, as well as actions to improve it, has been a huge concern for both academy and software development industry. Despite numerous studies, software organizations still do not know which the most significant factors are and what to do with it. Several studies present the factors in a very superficial way, some others address only the related factors or there are those that describe only a single factor. Actions to deal with the factors are spread and frequently were not mapped. Through a literature review, this paper presents a consolidated view of the main factors that have affected productivity over the years, and the strategies to deal with these factors nowadays. This research aims to support software development industry on the selection of their strategies to improve productivity by maximizing the positive factors and minimizing or avoiding the impact of the negative ones.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Since the late seventies, efforts to catalog factors that influences productivity, as well as actions to improve it, has been a huge concern for both academy and software development industry. Despite numerous studies, software organizations still do not know which the most significant factors are and what to do with it. Several studies present the factors in a very superficial way, some others address only the related factors or there are those that describe only a single factor. Actions to deal with the factors are spread and frequently were not mapped. Through a literature review, this paper presents a consolidated view of the main factors that have affected productivity over the years, and the strategies to deal with these factors nowadays. This research aims to support software development industry on the selection of their strategies to improve productivity by maximizing the positive factors and minimizing or avoiding the impact of the negative ones.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Since the late seventies, efforts to catalog factors that influences productivity, as well as actions to improve it, has been a huge concern for both academy and software development industry. Despite numerous studies, software organizations still do not know which the most significant factors are and what to do with it. Several studies present the factors in a very superficial way, some others address only the related factors or there are those that describe only a single factor. Actions to deal with the factors are spread and frequently were not mapped. Through a literature review, this paper presents a consolidated view of the main factors that have affected productivity over the years, and the strategies to deal with these factors nowadays. This research aims to support software development industry on the selection of their strategies to improve productivity by maximizing the positive factors and minimizing or avoiding the impact of the negative ones.",
"fno": "4144a196",
"keywords": [
"Productivity",
"Software Development Management",
"Productivity Factors",
"Catalog Factors",
"Software Development Industry",
"Software Organizations",
"Productivity",
"Software",
"Organizations",
"Complexity Theory",
"Tools",
"Computer Languages",
"Software Engineering",
"Productivity Factors",
"Strategies",
"Software Engineering",
"Productivity Improvement"
],
"authors": [
{
"affiliation": "CIN - Inf. Center, Fed. Univ. of Pernambuco - UFPE, Recife, Brazil",
"fullName": "Suzana Cândido de Barros Sampaio",
"givenName": "Suzana Cândido",
"surname": "de Barros Sampaio",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CIN - Inf. Center, Fed. Univ. of Pernambuco - UFPE, Recife, Brazil",
"fullName": "Emanuella Aleixo Barros",
"givenName": "Emanuella Aleixo",
"surname": "Barros",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CESAR - Recife Center of Advanced Studies & Systems, Recife-PE, Brazil",
"fullName": "Gibeon Soares de Aquino Jr.",
"givenName": "Gibeon Soares",
"surname": "de Aquino",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CESAR - Recife Center of Adv. Studies & Syst., Recife, Brazil",
"fullName": "Mauro José Carlos e Silva",
"givenName": "Mauro José Carlos",
"surname": "e Silva",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CESAR - Recife Center of Advanced Studies & Systems, Recife, Brazil",
"fullName": "Silvio Romero de Lemos Meira",
"givenName": "Silvio Romero",
"surname": "de Lemos Meira",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icsea",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "196-204",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-7788-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4144a190",
"articleId": "12OmNvlg8iR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4144a205",
"articleId": "12OmNAlvI2X",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/agile/2011/426/0/06005486",
"title": "Agile Team Perceptions of Productivity Factors",
"doi": null,
"abstractUrl": "/proceedings-article/agile/2011/06005486/12OmNAlvHyZ",
"parentPublication": {
"id": "proceedings/agile/2011/426/0",
"title": "2011 Agile Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2017/3681/0/3681a526",
"title": "An Empirical Study to Revisit Productivity across Different Programming Languages",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2017/3681a526/12OmNBNM8OQ",
"parentPublication": {
"id": "proceedings/apsec/2017/3681/0",
"title": "2017 24th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2017/3681/0/3681a737",
"title": "Factors Influencing Productivity of Agile Software Development Teamwork: A Qualitative System Dynamics Approach",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2017/3681a737/12OmNvzJGbB",
"parentPublication": {
"id": "proceedings/apsec/2017/3681/0",
"title": "2017 24th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2013/4892/0/4892e606",
"title": "Why SPI Initiative Failed: Contextual Factors and Changing Software Development Environment",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892e606/12OmNwKoZca",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esem/2017/4039/0/4039a436",
"title": "On Software Productivity Analysis with Propensity Score Matching",
"doi": null,
"abstractUrl": "/proceedings-article/esem/2017/4039a436/12OmNwnH4Mf",
"parentPublication": {
"id": "proceedings/esem/2017/4039/0",
"title": "2017 ACM/IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/conisoft/2017/3956/0/395601a044",
"title": "Productivity in Agile Software Development: A Systematic Mapping Study",
"doi": null,
"abstractUrl": "/proceedings-article/conisoft/2017/395601a044/12OmNzV70o8",
"parentPublication": {
"id": "proceedings/conisoft/2017/3956/0",
"title": "2017 5th International Conference in Software Engineering Research and Innovation (CONISOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwsm-mensura/2014/4174/0/4174a173",
"title": "Productivity Monitoring Process Using FPA - Improving Your Development Process Using Productivity Indicators",
"doi": null,
"abstractUrl": "/proceedings-article/iwsm-mensura/2014/4174a173/12OmNzZWbNx",
"parentPublication": {
"id": "proceedings/iwsm-mensura/2014/4174/0",
"title": "2014 Joint Conference of the International Workshop on Software Measurement and the International Conference on Software Process and Product Measurement (IWSM-MENSURA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2013/03/tts2013030343",
"title": "Coordination Breakdowns and Their Impact on Development Productivity and Software Failures",
"doi": null,
"abstractUrl": "/journal/ts/2013/03/tts2013030343/13rRUIM2VDf",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsme/2018/7870/0/787000a732",
"title": "Team Maturity in Agile Software Development: The Impact on Productivity",
"doi": null,
"abstractUrl": "/proceedings-article/icsme/2018/787000a732/17D45Wuc33H",
"parentPublication": {
"id": "proceedings/icsme/2018/7870/0",
"title": "2018 IEEE International Conference on Software Maintenance and Evolution (ICSME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2021/04/08658138",
"title": "The Effect of Work Environments on Productivity and Satisfaction of Software Engineers",
"doi": null,
"abstractUrl": "/journal/ts/2021/04/08658138/187ZCMEkrQs",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzFMFp9",
"title": "2011 30th International Conference of the Chilean Computer Science Society",
"acronym": "sccc",
"groupId": "1001489",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNywxlOt",
"doi": "10.1109/SCCC.2011.11",
"title": "A Comparative Analysis of the Agile and Traditional Software Development Processes Productivity",
"normalizedTitle": "A Comparative Analysis of the Agile and Traditional Software Development Processes Productivity",
"abstract": "Software development processes are essential for an organization to obtain the required levels of productivity and quality. The productivity analysis of agile and traditional development processes is an open and few explored research area, which has attracted the interest of industrial and academic fellows in order to take advantage of the strengths of both approaches. This research aims to investigate good options for agile and traditional integration by defining a hybrid process that takes advantage of both approaches. An empirical study aiming to evaluate the productivity impact of the proposed hybrid process was carried out in a Brazilian CMMI v.1.1 Maturity Level 2 medium-sized company. Five groups of similar projects were compared with respect to productivity, some of which were developed using the new hybrid process and others were developed using the older RUP-based process. Quantitative results have shown that four out of five project groups showed significant productivity increase in Scrum-RUP projects. The study shows that it is possible to integrate agile practices in the software development process without losing the rigor needed in the desired sub processes and still get real development productivity gain.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Software development processes are essential for an organization to obtain the required levels of productivity and quality. The productivity analysis of agile and traditional development processes is an open and few explored research area, which has attracted the interest of industrial and academic fellows in order to take advantage of the strengths of both approaches. This research aims to investigate good options for agile and traditional integration by defining a hybrid process that takes advantage of both approaches. An empirical study aiming to evaluate the productivity impact of the proposed hybrid process was carried out in a Brazilian CMMI v.1.1 Maturity Level 2 medium-sized company. Five groups of similar projects were compared with respect to productivity, some of which were developed using the new hybrid process and others were developed using the older RUP-based process. Quantitative results have shown that four out of five project groups showed significant productivity increase in Scrum-RUP projects. The study shows that it is possible to integrate agile practices in the software development process without losing the rigor needed in the desired sub processes and still get real development productivity gain.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Software development processes are essential for an organization to obtain the required levels of productivity and quality. The productivity analysis of agile and traditional development processes is an open and few explored research area, which has attracted the interest of industrial and academic fellows in order to take advantage of the strengths of both approaches. This research aims to investigate good options for agile and traditional integration by defining a hybrid process that takes advantage of both approaches. An empirical study aiming to evaluate the productivity impact of the proposed hybrid process was carried out in a Brazilian CMMI v.1.1 Maturity Level 2 medium-sized company. Five groups of similar projects were compared with respect to productivity, some of which were developed using the new hybrid process and others were developed using the older RUP-based process. Quantitative results have shown that four out of five project groups showed significant productivity increase in Scrum-RUP projects. The study shows that it is possible to integrate agile practices in the software development process without losing the rigor needed in the desired sub processes and still get real development productivity gain.",
"fno": "06363385",
"keywords": [
"Capability Maturity Model",
"Software Prototyping",
"Agile Software Development Processes Productivity",
"Traditional Software Development Processes Productivity",
"Productivity Analysis",
"Brazilian CMMI V 1 1 Maturity Level 2 Medium Sized Company",
"Hybrid Process",
"Scrum RUP Projects",
"Development Productivity Gain",
"Software",
"Productivity",
"Companies",
"Process Control",
"Computer Architecture",
"Measurement",
"Planning"
],
"authors": [
{
"affiliation": null,
"fullName": "William Chaves de Souza Carvalho",
"givenName": "William Chaves de Souza",
"surname": "Carvalho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pedro Frosi Rosa",
"givenName": "Pedro Frosi",
"surname": "Rosa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Michel dos Santos Soares",
"givenName": "Michel dos Santos",
"surname": "Soares",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Marco Antonio Teixeira da Cunha Jr.",
"givenName": "Marco Antonio Teixeira da",
"surname": "Cunha Jr.",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Luiz Carlos Buiatte",
"givenName": "Luiz Carlos",
"surname": "Buiatte",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sccc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "74-82",
"year": "2011",
"issn": "1522-4902",
"isbn": "978-0-7695-4689-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06363384",
"articleId": "12OmNrYCXPk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06363386",
"articleId": "12OmNy50gja",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/artcom/2009/3845/0/3845a001",
"title": "Agile EDI Framework for B2B Applications",
"doi": null,
"abstractUrl": "/proceedings-article/artcom/2009/3845a001/12OmNAkWvqw",
"parentPublication": {
"id": "proceedings/artcom/2009/3845/0",
"title": "Advances in Recent Technologies in Communication and Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/agile/2011/426/0/06005486",
"title": "Agile Team Perceptions of Productivity Factors",
"doi": null,
"abstractUrl": "/proceedings-article/agile/2011/06005486/12OmNAlvHyZ",
"parentPublication": {
"id": "proceedings/agile/2011/426/0",
"title": "2011 Agile Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2017/3681/0/3681a737",
"title": "Factors Influencing Productivity of Agile Software Development Teamwork: A Qualitative System Dynamics Approach",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2017/3681a737/12OmNvzJGbB",
"parentPublication": {
"id": "proceedings/apsec/2017/3681/0",
"title": "2017 24th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cee-secr/2010/0605/0/05783174",
"title": "Processes and people",
"doi": null,
"abstractUrl": "/proceedings-article/cee-secr/2010/05783174/12OmNzUxO7D",
"parentPublication": {
"id": "proceedings/cee-secr/2010/0605/0",
"title": "Software Engineering Conference in Russia, Central and Eastern European",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/conisoft/2017/3956/0/395601a044",
"title": "Productivity in Agile Software Development: A Systematic Mapping Study",
"doi": null,
"abstractUrl": "/proceedings-article/conisoft/2017/395601a044/12OmNzV70o8",
"parentPublication": {
"id": "proceedings/conisoft/2017/3956/0",
"title": "2017 5th International Conference in Software Engineering Research and Innovation (CONISOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2010/03/05232801",
"title": "Using the Agile Unified Process in Banking",
"doi": null,
"abstractUrl": "/magazine/so/2010/03/05232801/13rRUxDItfv",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsme/2018/7870/0/787000a732",
"title": "Team Maturity in Agile Software Development: The Impact on Productivity",
"doi": null,
"abstractUrl": "/proceedings-article/icsme/2018/787000a732/17D45Wuc33H",
"parentPublication": {
"id": "proceedings/icsme/2018/7870/0",
"title": "2018 IEEE International Conference on Software Maintenance and Evolution (ICSME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2021/04/08664196",
"title": "Studying Task Processes for Improving Programmer Productivity",
"doi": null,
"abstractUrl": "/journal/ts/2021/04/08664196/1mq8mkdh0fm",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/seaa/2020/9532/0/09226278",
"title": "Productivity, Turnover, and Team Stability of Agile Teams in Open-Source Software Projects",
"doi": null,
"abstractUrl": "/proceedings-article/seaa/2020/09226278/1nYsRCjImm4",
"parentPublication": {
"id": "proceedings/seaa/2020/9532/0",
"title": "2020 46th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2020/7303/0/730300a460",
"title": "Productivity Evaluation Indicators Based on LEAN and their Application to Compare Agile and Waterfall Projects",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2020/730300a460/1nkDe3ujN1m",
"parentPublication": {
"id": "proceedings/compsac/2020/7303/0",
"title": "2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnWZ1klXdS",
"doi": "10.1109/VRW52623.2021.00132",
"title": "Gender Differences of Cognitive Loads in Augmented Reality-based Warehouse",
"normalizedTitle": "Gender Differences of Cognitive Loads in Augmented Reality-based Warehouse",
"abstract": "The rapid emergence of augmented reality (AR) has brought considerable advantages to warehouse workers. However, due to inherent biological and cognitive differences, the male and female workers perceive cognitive loads differently. Understanding the differences is essential to improve the workers' productivity and well-being. Therefore, we developed the AR headset that helped participants facilitate parcel scanning and evaluated the gender differences in the context of long-lasting repetitive parcel scanning. The results show that the female workers had significantly lower operational efficiency, higher visual attention, and higher memory loads than the male, but they quickly gained advantages in these aspects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The rapid emergence of augmented reality (AR) has brought considerable advantages to warehouse workers. However, due to inherent biological and cognitive differences, the male and female workers perceive cognitive loads differently. Understanding the differences is essential to improve the workers' productivity and well-being. Therefore, we developed the AR headset that helped participants facilitate parcel scanning and evaluated the gender differences in the context of long-lasting repetitive parcel scanning. The results show that the female workers had significantly lower operational efficiency, higher visual attention, and higher memory loads than the male, but they quickly gained advantages in these aspects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The rapid emergence of augmented reality (AR) has brought considerable advantages to warehouse workers. However, due to inherent biological and cognitive differences, the male and female workers perceive cognitive loads differently. Understanding the differences is essential to improve the workers' productivity and well-being. Therefore, we developed the AR headset that helped participants facilitate parcel scanning and evaluated the gender differences in the context of long-lasting repetitive parcel scanning. The results show that the female workers had significantly lower operational efficiency, higher visual attention, and higher memory loads than the male, but they quickly gained advantages in these aspects.",
"fno": "405700a500",
"keywords": [
"Augmented Reality",
"Cognition",
"Gaze Tracking",
"Gender Issues",
"Neurophysiology",
"Personnel",
"Production Engineering Computing",
"Warehousing",
"Male Workers",
"Female Workers",
"Cognitive Loads",
"Gender Differences",
"Long Lasting Repetitive Parcel Scanning",
"Higher Memory Loads",
"Warehouse Workers",
"Cognitive Differences",
"Augmented Reality Based Warehouse",
"Biological Differences",
"AR Headset",
"Headphones",
"Productivity",
"Visualization",
"Three Dimensional Displays",
"Conferences",
"Memory Management",
"User Interfaces",
"Gender Difference",
"Cognitive Loads",
"Augmented Reality",
"Contemporary Warehouse"
],
"authors": [
{
"affiliation": "Zhejiang University,College of computer science and technology,Hangzhou,China P.R,310027",
"fullName": "Zihan Yan",
"givenName": "Zihan",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University,College of computer science and technology,Hangzhou,China P.R,310027",
"fullName": "Yifei Shan",
"givenName": "Yifei",
"surname": "Shan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University,College of computer science and technology,Hangzhou,China P.R,310027",
"fullName": "Yiyang Li",
"givenName": "Yiyang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University,College of computer science and technology,Hangzhou,China P.R,310027",
"fullName": "Kailin Yin",
"givenName": "Kailin",
"surname": "Yin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University,College of computer science and technology,Hangzhou,China P.R,310027",
"fullName": "Xiangdong Li",
"givenName": "Xiangdong",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "500-501",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a498",
"articleId": "1tnWFlvbESk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a502",
"articleId": "1tnXkpvZfqg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccit/2009/3896/0/3896a571",
"title": "Family-Friendly Policies and Work-Family Balance: The Gender Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/iccit/2009/3896a571/12OmNAlvHBy",
"parentPublication": {
"id": "proceedings/iccit/2009/3896/0",
"title": "Convergence Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2010/4215/0/4215a398",
"title": "Effects of Smiling and Gender on Trust Toward a Recommendation Agent",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2010/4215a398/12OmNBJw9Rs",
"parentPublication": {
"id": "proceedings/cw/2010/4215/0",
"title": "2010 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2004/8552/0/01408748",
"title": "Gender trends in engineering retention",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2004/01408748/12OmNCd2roX",
"parentPublication": {
"id": "proceedings/fie/2004/8552/0",
"title": "34th Annual Frontiers in Education, 2004. FIE 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcabes/2015/6593/0/6593a447",
"title": "Gender Difference in the Use of Hospitalization Services in Rural China - Evidence from Sichuan Province",
"doi": null,
"abstractUrl": "/proceedings-article/dcabes/2015/6593a447/12OmNwvDQwn",
"parentPublication": {
"id": "proceedings/dcabes/2015/6593/0",
"title": "2015 14th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2016/5670/0/5670d858",
"title": "Gender Differences in Online Dating: What Do We Know So Far? A Systematic Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670d858/12OmNxwWoRp",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2013/4892/0/4892e817",
"title": "Evaluating Gender Significance within a Pair Programming Context",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892e817/12OmNyp9MlS",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2015/7367/0/7367b869",
"title": "Does Gender Make a Difference? Undergraduate Students' Use of Smart CVs for Career Planning",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2015/7367b869/12OmNywfKy2",
"parentPublication": {
"id": "proceedings/hicss/2015/7367/0",
"title": "2015 48th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a672",
"title": "Proximity in VR: The Importance of Character Attractiveness and Participant Gender",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a672/1CJdlUeTTlC",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2022/6244/0/09962682",
"title": "Examining Student Responses and Gender Differences to a First-Year Sociotechnical Engineering Course",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2022/09962682/1IHohJ4oqis",
"parentPublication": {
"id": "proceedings/fie/2022/6244/0",
"title": "2022 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a687",
"title": "Who Are Virtual Reality Headset Owners? A Survey and Comparison of Headset Owners and Non-Owners",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a687/1tuB6Ibu8j6",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnX9xsCTVC",
"doi": "10.1109/VRW52623.2021.00263",
"title": "Demonstrating the Use of Rapid Touch Interaction in Virtual Reality for Prolonged Interaction in Productivity Scenarios",
"normalizedTitle": "Demonstrating the Use of Rapid Touch Interaction in Virtual Reality for Prolonged Interaction in Productivity Scenarios",
"abstract": "Current camera-based VR headsets support free-hand mid-air inter-action or physical hand-held controllers for input, which can lead to fatigue during use, as users lack support for their arms and hands between interactions. In our demonstration, we showcase a novel approach to bring quick touch interaction to Virtual Reality, illustrating the beneficial use of rapid tapping, typing, and surface gestures for ongoing interaction in Virtual Reality, particularly in the con-text of content creation and productivity scenarios. The productivity scenarios that become possible using our approach are therefore reminiscent of apps that exist on today's phones and tablets. To reliably make touch interaction work in VR, we use a wrist-worn prototype to complement the optical hand tracking from VR headsets with inertial sensing to detect touch events on surfaces. Our prototype band TapID integrates a pair of inertial sensors in a flexible strap, from whose signals TapID reliably detects surface touch events and identifies the finger used for touch. This event detection is then fused with the optically tracked hand poses to trigger input in VR. Our demonstration comprises a series of VR applications, including UI control in word processors, web browsers, and document editors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Current camera-based VR headsets support free-hand mid-air inter-action or physical hand-held controllers for input, which can lead to fatigue during use, as users lack support for their arms and hands between interactions. In our demonstration, we showcase a novel approach to bring quick touch interaction to Virtual Reality, illustrating the beneficial use of rapid tapping, typing, and surface gestures for ongoing interaction in Virtual Reality, particularly in the con-text of content creation and productivity scenarios. The productivity scenarios that become possible using our approach are therefore reminiscent of apps that exist on today's phones and tablets. To reliably make touch interaction work in VR, we use a wrist-worn prototype to complement the optical hand tracking from VR headsets with inertial sensing to detect touch events on surfaces. Our prototype band TapID integrates a pair of inertial sensors in a flexible strap, from whose signals TapID reliably detects surface touch events and identifies the finger used for touch. This event detection is then fused with the optically tracked hand poses to trigger input in VR. Our demonstration comprises a series of VR applications, including UI control in word processors, web browsers, and document editors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Current camera-based VR headsets support free-hand mid-air inter-action or physical hand-held controllers for input, which can lead to fatigue during use, as users lack support for their arms and hands between interactions. In our demonstration, we showcase a novel approach to bring quick touch interaction to Virtual Reality, illustrating the beneficial use of rapid tapping, typing, and surface gestures for ongoing interaction in Virtual Reality, particularly in the con-text of content creation and productivity scenarios. The productivity scenarios that become possible using our approach are therefore reminiscent of apps that exist on today's phones and tablets. To reliably make touch interaction work in VR, we use a wrist-worn prototype to complement the optical hand tracking from VR headsets with inertial sensing to detect touch events on surfaces. Our prototype band TapID integrates a pair of inertial sensors in a flexible strap, from whose signals TapID reliably detects surface touch events and identifies the finger used for touch. This event detection is then fused with the optically tracked hand poses to trigger input in VR. Our demonstration comprises a series of VR applications, including UI control in word processors, web browsers, and document editors.",
"fno": "405700a761",
"keywords": [
"Cameras",
"Gesture Recognition",
"Human Computer Interaction",
"Optical Tracking",
"Touch Sensitive Screens",
"User Interfaces",
"Virtual Reality",
"Wearable Computers",
"Quick Touch Interaction",
"Virtual Reality",
"Rapid Tapping",
"Surface Gestures",
"Ongoing Interaction",
"Productivity Scenarios",
"Wrist Worn Prototype",
"Optical Hand Tracking",
"Touch Events",
"Prototype Band Tap ID",
"Event Detection",
"Hand Poses",
"VR Applications",
"Rapid Touch Interaction",
"Mid Air Inter Action",
"Physical Hand Held Controllers",
"Camera Based VR Headset Support",
"Productivity",
"Headphones",
"Three Dimensional Displays",
"Conferences",
"Prototypes",
"Virtual Reality",
"User Interfaces",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Interaction Techniques",
"Gestural Input"
],
"authors": [
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Manuel Meier",
"givenName": "Manuel",
"surname": "Meier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Paul Streli",
"givenName": "Paul",
"surname": "Streli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Andreas Fender",
"givenName": "Andreas",
"surname": "Fender",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Christian Holz",
"givenName": "Christian",
"surname": "Holz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "761-762",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a759",
"articleId": "1tnXiK8j7fq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a763",
"articleId": "1tnXmgy4vDi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tq/2019/03/08276563",
"title": "GaitLock: Protect Virtual and Augmented Reality Headsets Using Gait",
"doi": null,
"abstractUrl": "/journal/tq/2019/03/08276563/13rRUwI5TSF",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699248",
"title": "DualGaze: Addressing the Midas Touch Problem in Gaze Mediated VR Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699248/19F1R5RaLFS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a023",
"title": "View-Adaptive Asymmetric Image Detail Enhancement for 360-degree Stereoscopic VR Content",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a023/1CJcBKE82SA",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a727",
"title": "Phantom Touch phenomenon as a manifestation of the Visual-Auditory-Tactile Synaesthesia and its impact on the users in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a727/1J7WmsXKzxS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a905",
"title": "Haptics in VR Using Origami-Augmented Drones",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a905/1J7WrPcWIVO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a807",
"title": "Touching The Droid: Understanding and Improving Touch Precision With Mobile Devices in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a807/1JrR8xUGjpm",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797993",
"title": "VirtualTablet: Extending Movable Surfaces with Touch Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797993/1cJ1hgQ4Li8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a361",
"title": "Pen-based Interaction with Spreadsheets in Mobile Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a361/1pysxojAVAk",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a528",
"title": "VXSlate: Combining Head Movement and Mobile Touch for Large Virtual Display Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a528/1tnXg447e7e",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a519",
"title": "TapID: Rapid Touch Interaction in Virtual Reality using Wearable Sensing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a519/1tuBtNYt0LC",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAI6Ij8is",
"doi": "10.1109/VR50410.2021.00103",
"title": "Do we still need physical monitors? An evaluation of the usability of AR virtual monitors for productivity work",
"normalizedTitle": "Do we still need physical monitors? An evaluation of the usability of AR virtual monitors for productivity work",
"abstract": "Physical monitors require space, lack flexibility, and can become expensive and less portable in large setups. Virtual monitors, on the other hand, can minimize those problems, but may be subject to technological limitations such as lower resolution and field of view. We investigate the impacts of using virtual monitors displayed on a current state-of-the-art augmented reality headset for conducting productivity work. We conducted a user study that compared physical monitors, virtual monitors, and a hybrid combination of both in terms of performance, accuracy, comfort, focus, preference, and confidence. Results show that virtual monitors are a feasible approach for performing serious productivity work, albeit currently constrained by technical limitations that lead to inferior usability and performance compared to physical monitors. We also discovered that, with current technology, the hybrid condition was a better tradeoff between the familiarity and trustworthiness of physical monitors and the extra space provided by virtual monitors. We conclude by expressing the opportunity for designing strategies for mixing virtual and physical monitors into novel hybrid interfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Physical monitors require space, lack flexibility, and can become expensive and less portable in large setups. Virtual monitors, on the other hand, can minimize those problems, but may be subject to technological limitations such as lower resolution and field of view. We investigate the impacts of using virtual monitors displayed on a current state-of-the-art augmented reality headset for conducting productivity work. We conducted a user study that compared physical monitors, virtual monitors, and a hybrid combination of both in terms of performance, accuracy, comfort, focus, preference, and confidence. Results show that virtual monitors are a feasible approach for performing serious productivity work, albeit currently constrained by technical limitations that lead to inferior usability and performance compared to physical monitors. We also discovered that, with current technology, the hybrid condition was a better tradeoff between the familiarity and trustworthiness of physical monitors and the extra space provided by virtual monitors. We conclude by expressing the opportunity for designing strategies for mixing virtual and physical monitors into novel hybrid interfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Physical monitors require space, lack flexibility, and can become expensive and less portable in large setups. Virtual monitors, on the other hand, can minimize those problems, but may be subject to technological limitations such as lower resolution and field of view. We investigate the impacts of using virtual monitors displayed on a current state-of-the-art augmented reality headset for conducting productivity work. We conducted a user study that compared physical monitors, virtual monitors, and a hybrid combination of both in terms of performance, accuracy, comfort, focus, preference, and confidence. Results show that virtual monitors are a feasible approach for performing serious productivity work, albeit currently constrained by technical limitations that lead to inferior usability and performance compared to physical monitors. We also discovered that, with current technology, the hybrid condition was a better tradeoff between the familiarity and trustworthiness of physical monitors and the extra space provided by virtual monitors. We conclude by expressing the opportunity for designing strategies for mixing virtual and physical monitors into novel hybrid interfaces.",
"fno": "255600a759",
"keywords": [
"Augmented Reality",
"AR Virtual Monitors",
"Productivity Work",
"Usability Evaluation",
"Technological Limitations",
"Augmented Reality Headset",
"Physical Monitor Trustworthiness",
"Productivity",
"Headphones",
"Three Dimensional Displays",
"User Interfaces",
"Usability",
"Monitoring",
"Augmented Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Empirical Studies In Interaction Design"
],
"authors": [
{
"affiliation": "Center for Human-Computer Interaction,Department of Computer Science Virginia Tech,Blacksburg,VA,USA",
"fullName": "Leonardo Pavanatto",
"givenName": "Leonardo",
"surname": "Pavanatto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sanghani Center for AI and Analytics,Department of Computer Science Virginia Tech,Blacksburg,VA,USA",
"fullName": "Chris North",
"givenName": "Chris",
"surname": "North",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Human-Computer Interaction,Department of Computer Science Virginia Tech,Blacksburg,VA,USA",
"fullName": "Doug A. Bowman",
"givenName": "Doug A.",
"surname": "Bowman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Redmond,WA,USA",
"fullName": "Carmen Badea",
"givenName": "Carmen",
"surname": "Badea",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Redmond,WA,USA",
"fullName": "Richard Stoakley",
"givenName": "Richard",
"surname": "Stoakley",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "759-767",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "255600a749",
"articleId": "1tuAgrCerNC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a768",
"articleId": "1tuAQLvc5WM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismarw/2016/3740/0/07836488",
"title": "A Transitional AR Furniture Arrangement System with Automatic View Recommendation",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836488/12OmNBVrji6",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2011/4420/0/4420b009",
"title": "Mirror Worlds: Experimenting with Heterogeneous AR",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2011/4420b009/12OmNzFv4kl",
"parentPublication": {
"id": "proceedings/isuvr/2011/4420/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260971",
"title": "Exercise Intensity-Driven Level Design",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260971/13rRUwcAqqn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699229",
"title": "The Effect of AR Based Emotional Interaction Among Personified Physical Objects in Manual Operation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699229/19F1LS1YWuA",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873972",
"title": "From Shielding to Avoidance: Passenger Augmented Reality and the Layout of Virtual Displays for Productivity in Shared Transit",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873972/1GjwQcFdVyU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2023/02/10043615",
"title": "Developer Productivity for Humans, Part 2: Hybrid Productivity",
"doi": null,
"abstractUrl": "/magazine/so/2023/02/10043615/1KJseB7yrdK",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a076",
"title": "MiXR: A Hybrid AR Sheet Music Interface for Live Performance",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a076/1pBMiNAfDpK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a761",
"title": "Demonstrating the Use of Rapid Touch Interaction in Virtual Reality for Prolonged Interaction in Productivity Scenarios",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a761/1tnX9xsCTVC",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a265",
"title": "The Passenger Experience of Mixed Reality Virtual Display Layouts in Airplane Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a265/1yeCTWHYvxS",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a459",
"title": "Designing Augmented Reality Virtual Displays for Productivity Work",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a459/1yeQDpgn9ks",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeCTWHYvxS",
"doi": "10.1109/ISMAR52148.2021.00042",
"title": "The Passenger Experience of Mixed Reality Virtual Display Layouts in Airplane Environments",
"normalizedTitle": "The Passenger Experience of Mixed Reality Virtual Display Layouts in Airplane Environments",
"abstract": "Augmented / Mixed Reality headsets will in-time see adoption and use in a variety of mobility and transit contexts, allowing users to view and interact with virtual content and displays for productivity and entertainment. However, little is known regarding how multi-display virtual workspaces should be presented in a transit context, nor to what extent the unique affordances of transit environments (e.g. the social presence of others) might influence passenger perception of virtual display layouts. Using a simulated VR passenger airplane environment, we evaluated three different AR-driven virtual display configurations (Horizontal, Vertical, and Focus main display with smaller secondary windows) at two different depths, exploring their usability, user preferences, and the underlying factors that influenced those preferences. We found that the perception of invading other’s personal space significantly influenced preferred layouts in transit contexts. Based on our findings, we reflect on the unique challenges posed by passenger contexts, provide recommendations regarding virtual display layout in the confined airplane environment, and expand on the significant benefits that AR offers over physical displays in said environments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented / Mixed Reality headsets will in-time see adoption and use in a variety of mobility and transit contexts, allowing users to view and interact with virtual content and displays for productivity and entertainment. However, little is known regarding how multi-display virtual workspaces should be presented in a transit context, nor to what extent the unique affordances of transit environments (e.g. the social presence of others) might influence passenger perception of virtual display layouts. Using a simulated VR passenger airplane environment, we evaluated three different AR-driven virtual display configurations (Horizontal, Vertical, and Focus main display with smaller secondary windows) at two different depths, exploring their usability, user preferences, and the underlying factors that influenced those preferences. We found that the perception of invading other’s personal space significantly influenced preferred layouts in transit contexts. Based on our findings, we reflect on the unique challenges posed by passenger contexts, provide recommendations regarding virtual display layout in the confined airplane environment, and expand on the significant benefits that AR offers over physical displays in said environments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented / Mixed Reality headsets will in-time see adoption and use in a variety of mobility and transit contexts, allowing users to view and interact with virtual content and displays for productivity and entertainment. However, little is known regarding how multi-display virtual workspaces should be presented in a transit context, nor to what extent the unique affordances of transit environments (e.g. the social presence of others) might influence passenger perception of virtual display layouts. Using a simulated VR passenger airplane environment, we evaluated three different AR-driven virtual display configurations (Horizontal, Vertical, and Focus main display with smaller secondary windows) at two different depths, exploring their usability, user preferences, and the underlying factors that influenced those preferences. We found that the perception of invading other’s personal space significantly influenced preferred layouts in transit contexts. Based on our findings, we reflect on the unique challenges posed by passenger contexts, provide recommendations regarding virtual display layout in the confined airplane environment, and expand on the significant benefits that AR offers over physical displays in said environments.",
"fno": "015800a265",
"keywords": [
"Aerospace Computing",
"Augmented Reality",
"Human Computer Interaction",
"Virtual Reality",
"Airplane Environments",
"Multidisplay Virtual Workspaces",
"Transit Environments",
"Passenger Perception",
"Virtual Display Layout",
"Simulated VR Passenger Airplane Environment",
"User Preferences",
"Passenger Contexts",
"Confined Airplane Environment",
"Passenger Experience",
"AR Driven Virtual Display Configurations",
"Mixed Reality Virtual Display Layouts",
"Augmented Reality",
"Productivity",
"Headphones",
"Airplanes",
"Layout",
"Entertainment Industry",
"Mixed Reality",
"Virtual Environments",
"Mixed Reality",
"Virtual Reality",
"Augmented Reality",
"Multi Display Layouts",
"Virtual Workspace",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms"
],
"authors": [
{
"affiliation": "University of Glasgow,Glasgow Interactive Systems (GIST), School of Computing Science",
"fullName": "Alexander Ng",
"givenName": "Alexander",
"surname": "Ng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Glasgow,Glasgow Interactive Systems (GIST), School of Computing Science",
"fullName": "Daniel Medeiros",
"givenName": "Daniel",
"surname": "Medeiros",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Glasgow,Glasgow Interactive Systems (GIST), School of Computing Science",
"fullName": "Mark McGill",
"givenName": "Mark",
"surname": "McGill",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Glasgow,Glasgow Interactive Systems (GIST), School of Computing Science",
"fullName": "Julie Williamson",
"givenName": "Julie",
"surname": "Williamson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Glasgow,Glasgow Interactive Systems (GIST), School of Computing Science",
"fullName": "Stephen Brewster",
"givenName": "Stephen",
"surname": "Brewster",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "265-274",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeCTwZi2Nq",
"name": "pismar202101580-09583840s1-mm_015800a265.zip",
"size": "46.7 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202101580-09583840s1-mm_015800a265.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "015800a256",
"articleId": "1yeD14AjfEI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a275",
"articleId": "1yeCVsvyctG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223321",
"title": "Turbulent motions cannot shake VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223321/12OmNCeaPWO",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446461",
"title": "In-Car 6-DoF Mixed Reality for Rear-Seat and Co-Driver Entertainment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446461/13bd1fHrlSc",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699226",
"title": "A Virtual Boarding System of an Autonomous Vehicle for Investigating the Effect of an AR Display on Passenger Comfort",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699226/19F1TgkuQaQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a928",
"title": "[DC] Mixed Reality Interaction for Mobile Knowledge Work",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a928/1CJdRhDCDTO",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873972",
"title": "From Shielding to Avoidance: Passenger Augmented Reality and the Layout of Virtual Displays for Productivity in Shared Transit",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873972/1GjwQcFdVyU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a361",
"title": "Pen-based Interaction with Spreadsheets in Mobile Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a361/1pysxojAVAk",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a767",
"title": "Shared Augmented Reality Experience Between a Microsoft Flight Simulator User and a User in the Real World",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a767/1tnWQgmPIeA",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a528",
"title": "VXSlate: Combining Head Movement and Mobile Touch for Large Virtual Display Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a528/1tnXg447e7e",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a757",
"title": "Shared Augmented Reality Experience Between a Microsoft Flight Simulator User and a User in the Real World",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a757/1tnXzdM3r0c",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciddt/2020/0367/0/036700a367",
"title": "Research of interactive experience display of child safety seats based on Mixed Reality technology",
"doi": null,
"abstractUrl": "/proceedings-article/iciddt/2020/036700a367/1wutF1SqGty",
"parentPublication": {
"id": "proceedings/iciddt/2020/0367/0",
"title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeQDpgn9ks",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00107",
"title": "Designing Augmented Reality Virtual Displays for Productivity Work",
"normalizedTitle": "Designing Augmented Reality Virtual Displays for Productivity Work",
"abstract": "We must consider alternative displays for supporting productivity work in the context of an increasingly work-from-home world. Augmented reality virtual monitors can fulfill these needs by equipping users with large screen real estate, while maintaining portability, cost-effectiveness, and not occupying physical space. However, there are open questions regarding how to design virtual monitors. In my dissertation, I plan to investigate the design of virtual monitors to enhance productivity everywhere. This work comprises a group of design and user study contributions. I conducted a user study to understand the feasibility of virtual monitors and their tradeoffs when compared against physical monitors. I further propose investigating the design of static properties and dynamic behaviors that cannot be achieved through physical monitors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We must consider alternative displays for supporting productivity work in the context of an increasingly work-from-home world. Augmented reality virtual monitors can fulfill these needs by equipping users with large screen real estate, while maintaining portability, cost-effectiveness, and not occupying physical space. However, there are open questions regarding how to design virtual monitors. In my dissertation, I plan to investigate the design of virtual monitors to enhance productivity everywhere. This work comprises a group of design and user study contributions. I conducted a user study to understand the feasibility of virtual monitors and their tradeoffs when compared against physical monitors. I further propose investigating the design of static properties and dynamic behaviors that cannot be achieved through physical monitors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We must consider alternative displays for supporting productivity work in the context of an increasingly work-from-home world. Augmented reality virtual monitors can fulfill these needs by equipping users with large screen real estate, while maintaining portability, cost-effectiveness, and not occupying physical space. However, there are open questions regarding how to design virtual monitors. In my dissertation, I plan to investigate the design of virtual monitors to enhance productivity everywhere. This work comprises a group of design and user study contributions. I conducted a user study to understand the feasibility of virtual monitors and their tradeoffs when compared against physical monitors. I further propose investigating the design of static properties and dynamic behaviors that cannot be achieved through physical monitors.",
"fno": "129800a459",
"keywords": [
"Augmented Reality",
"Computer Displays",
"Productivity Work",
"Augmented Reality Virtual Monitors",
"Cost Effectiveness",
"Physical Space",
"Virtual Monitors",
"Physical Monitors",
"Augmented Reality Virtual Displays",
"Large Screen Real Estate",
"Dynamic Behaviors",
"Static Properties",
"Productivity",
"Usability",
"Task Analysis",
"Monitoring",
"Augmented Reality",
"Guidelines",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Mixed Augmented Reality",
"Empirical Studies In Interaction Design"
],
"authors": [
{
"affiliation": "Virginia Tech,Center for Human-Computer Interaction,Department of Computer Science,USA",
"fullName": "Leonardo Pavanatto",
"givenName": "Leonardo",
"surname": "Pavanatto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "459-460",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "129800a457",
"articleId": "1yeQWUAFeq4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a461",
"articleId": "1yeQKieLz3i",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icat/2007/3056/0/30560055",
"title": "Volumetric Display for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2007/30560055/12OmNBCqbJu",
"parentPublication": {
"id": "proceedings/icat/2007/3056/0",
"title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a111",
"title": "Designing for Depth Perceptions in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a111/12OmNrMZpBd",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836460",
"title": "An Augmented Reality Guide for Assisting Forklift Operation",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836460/12OmNvwTGFS",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2014/4261/0/4261a053",
"title": "Usability Heuristics for Collaborative Augmented Reality Remote Systems",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2014/4261a053/12OmNyPQ4xE",
"parentPublication": {
"id": "proceedings/svr/2014/4261/0",
"title": "2014 XVI Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2018/5663/0/566301a480",
"title": "Fostering Software Developers' Productivity at Work Through Self-Monitoring and Goal-Setting",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2018/566301a480/13bd1tl2omA",
"parentPublication": {
"id": "proceedings/icse-companion/2018/5663/0",
"title": "2018 IEEE/ACM 40th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873972",
"title": "From Shielding to Avoidance: Passenger Augmented Reality and the Layout of Virtual Displays for Productivity in Shared Transit",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873972/1GjwQcFdVyU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2022/6814/0/681400a094",
"title": "Sharing Work Appearance for Improvement in Remote Work Productivity",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2022/681400a094/1I6RRpPyfBe",
"parentPublication": {
"id": "proceedings/cw/2022/6814/0",
"title": "2022 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a327",
"title": "Augmented Virtuality Training for Special Education Teachers",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a327/1J7WbAdfchq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a279",
"title": "Enhancing Visitor Experience or Hindering Docent Roles: Attentional Issues in Augmented Reality Supported Installations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a279/1pysvRpTvr2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a759",
"title": "Do we still need physical monitors? An evaluation of the usability of AR virtual monitors for productivity work",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a759/1tuAI6Ij8is",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwLOYSu",
"title": "2017 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB1wkHu",
"doi": "10.1109/CW.2017.48",
"title": "Traversing Social Networks in the Virtual Dance Hall: Visualizing History in VR",
"normalizedTitle": "Traversing Social Networks in the Virtual Dance Hall: Visualizing History in VR",
"abstract": "Digital recreations of historical sites and events are important tools both for academic researchers [6,7] and for public interpretation [7,9]. Current 3D visualization and VR technologies enable these recreations to be increasingly immersive and engaging [10,14]. This poster describes a case study based on a mid-twentieth century Chester dance hall, examining the possibilities and limitations of 3D VR for recreating a public music venue which no longer physically exists, and also for visualizing and analyzing the professional network of musicians who played there, and at many other local venues.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Digital recreations of historical sites and events are important tools both for academic researchers [6,7] and for public interpretation [7,9]. Current 3D visualization and VR technologies enable these recreations to be increasingly immersive and engaging [10,14]. This poster describes a case study based on a mid-twentieth century Chester dance hall, examining the possibilities and limitations of 3D VR for recreating a public music venue which no longer physically exists, and also for visualizing and analyzing the professional network of musicians who played there, and at many other local venues.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Digital recreations of historical sites and events are important tools both for academic researchers [6,7] and for public interpretation [7,9]. Current 3D visualization and VR technologies enable these recreations to be increasingly immersive and engaging [10,14]. This poster describes a case study based on a mid-twentieth century Chester dance hall, examining the possibilities and limitations of 3D VR for recreating a public music venue which no longer physically exists, and also for visualizing and analyzing the professional network of musicians who played there, and at many other local venues.",
"fno": "2089a249",
"keywords": [
"History",
"Music",
"Virtual Reality",
"Social Networks",
"Virtual Dance Hall",
"Digital Recreations",
"Historical Sites",
"VR Technologies",
"Public Music Venue",
"3 D Visualization",
"History Visualization",
"Chester Dance Hall",
"History",
"Visualization",
"Games",
"Three Dimensional Displays",
"Data Visualization",
"Social Network Services",
"Music",
"Digital Humanities",
"Visualization",
"Virtual Reality",
"Digital Audio",
"Heritage In Cyberspace",
"Hidden Histories",
"British Jazz",
"Dance Bands"
],
"authors": [
{
"affiliation": null,
"fullName": "Helen Vera Southall",
"givenName": "Helen Vera",
"surname": "Southall",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lee Beever",
"givenName": "Lee",
"surname": "Beever",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Peter W. S. Butcher",
"givenName": "Peter W. S.",
"surname": "Butcher",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-09-01T00:00:00",
"pubType": "proceedings",
"pages": "249-252",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-2089-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2089a245",
"articleId": "12OmNzYeB2p",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2089a253",
"articleId": "12OmNxGALaE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wevr/2017/3881/0/07957714",
"title": "When sound modulates vision: VR applications for art and entertainment",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2017/07957714/12OmNBvkdnd",
"parentPublication": {
"id": "proceedings/wevr/2017/3881/0",
"title": "2017 IEEE 3rd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892365",
"title": "Defying the Nazis VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892365/12OmNro0HW7",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446581",
"title": "VR Touch Museum",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446581/13bd1fKQxrI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446510",
"title": "Comparing Interface Affordances for Controlling a Push Broom in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446510/13bd1tMztXW",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192680",
"title": "Interactive Visual Profiling of Musicians",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192680/13rRUwjoNx7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2018/7123/0/08493414",
"title": "Comparison of Teleportation and Fixed Track Driving in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493414/14tNJnrhcIw",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a093",
"title": "Exploring the Design Space for Immersive Embodiment in Dance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a093/1CJc1vWLV6w",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a762",
"title": "Design of a VR Action Observation Tool for Rhythmic Coordination Training",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a762/1CJdy3RCDvy",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a257",
"title": "An Interactive Chart of Biography",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a257/1cMF7iL2fZu",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a260",
"title": "Influence of hand visualization on tool-based motor skills training in an immersive VR simulator",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a260/1pyswAXnugM",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwwMf3B",
"title": "2016 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)",
"acronym": "vhcie",
"groupId": "1814624",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy2Jt9L",
"doi": "10.1109/VHCIE.2016.7563568",
"title": "Feeling crowded yet?: crowd simulations for VR",
"normalizedTitle": "Feeling crowded yet?: crowd simulations for VR",
"abstract": "With advances in virtual reality technology and its multiple applications, the need for believable, immersive virtual environments is increasing. Even though current computer graphics methods allow us to develop highly realistic virtual worlds, the main element failing to enhance presence is autonomous groups of human inhabitants. A great number of crowd simulation techniques have emerged in the last decade, but critical details in the crowd's movements and appearance do not meet the standards necessary to convince VR participants that they are present in a real crowd. In this paper, we review recent advances in the creation of immersive virtual crowds and discuss areas that require further work to turn these simulations into more fully immersive and believable experiences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With advances in virtual reality technology and its multiple applications, the need for believable, immersive virtual environments is increasing. Even though current computer graphics methods allow us to develop highly realistic virtual worlds, the main element failing to enhance presence is autonomous groups of human inhabitants. A great number of crowd simulation techniques have emerged in the last decade, but critical details in the crowd's movements and appearance do not meet the standards necessary to convince VR participants that they are present in a real crowd. In this paper, we review recent advances in the creation of immersive virtual crowds and discuss areas that require further work to turn these simulations into more fully immersive and believable experiences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With advances in virtual reality technology and its multiple applications, the need for believable, immersive virtual environments is increasing. Even though current computer graphics methods allow us to develop highly realistic virtual worlds, the main element failing to enhance presence is autonomous groups of human inhabitants. A great number of crowd simulation techniques have emerged in the last decade, but critical details in the crowd's movements and appearance do not meet the standards necessary to convince VR participants that they are present in a real crowd. In this paper, we review recent advances in the creation of immersive virtual crowds and discuss areas that require further work to turn these simulations into more fully immersive and believable experiences.",
"fno": "07563568",
"keywords": [
"Solid Modeling",
"Animation",
"Brain Modeling",
"Rendering Computer Graphics",
"Engines",
"Visualization",
"Lighting"
],
"authors": [
{
"affiliation": "Universitat Politecnica de Catalunya",
"fullName": "Nuria Pelechano",
"givenName": "Nuria",
"surname": "Pelechano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "George Mason University",
"fullName": "Jan M. Allbecky",
"givenName": "Jan M.",
"surname": "Allbecky",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vhcie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "17-21",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-0829-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07563567",
"articleId": "12OmNBhZ4e2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07563569",
"articleId": "12OmNwEJ0N7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciicii/2016/3575/0/3575a368",
"title": "Research on the Impact of Crowd Flow on Crowd Risk in Large Gathering Spots",
"doi": null,
"abstractUrl": "/proceedings-article/iciicii/2016/3575a368/12OmNBDgZ2V",
"parentPublication": {
"id": "proceedings/iciicii/2016/3575/0",
"title": "2016 International Conference on Industrial Informatics - Computing Technology, Intelligent Technology, Industrial Information Integration (ICIICII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802088",
"title": "Simulating crowd interactions in virtual environments (doctoral consortium)",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802088/12OmNwEJ0WC",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2011/1451/0/06000319",
"title": "Crowd simulation in emergency aircraft evacuation using Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2011/06000319/12OmNxj23jk",
"parentPublication": {
"id": "proceedings/cgames/2011/1451/0",
"title": "2011 16th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a048",
"title": "Crowd-Sourced Procedural Animation Optimisation: Comparing Desktop and VR Behaviour",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a048/12OmNyFU77Q",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eurosim/2013/5073/0/5073a151",
"title": "Approaches to Modeling the Emotional Aspects of a Crowd",
"doi": null,
"abstractUrl": "/proceedings-article/eurosim/2013/5073a151/12OmNzYwc5N",
"parentPublication": {
"id": "proceedings/eurosim/2013/5073/0",
"title": "2013 8th EUROSIM Congress on Modelling and Simulation (EUROSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2011/05/mcg2011050054",
"title": "Context-Aware Motion Diversification for Crowd Simulation",
"doi": null,
"abstractUrl": "/magazine/cg/2011/05/mcg2011050054/13rRUxjyXcP",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a119",
"title": "Re-enacting Football Matches in VR using Virtual Agents’ Realistic Behaviours",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a119/1KmFbcahv2M",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089573",
"title": "Effects of Interacting with a Crowd of Emotional Virtual Humans on Users’ Affective and Non-Verbal Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089573/1jIxfPwklig",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09273221",
"title": "Crowd Navigation in VR: Exploring Haptic Rendering of Collisions",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09273221/1pb9BhAe16o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a057",
"title": "CrowdAR Table An AR system for Real-time Interactive Crowd Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a057/1qpzAVsX4nm",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJf4aHcqoU",
"doi": "10.1109/VRW55335.2022.00149",
"title": "VR Wayfinding Training for People with Visual Impairment using VR Treadmill and VR Tracker",
"normalizedTitle": "VR Wayfinding Training for People with Visual Impairment using VR Treadmill and VR Tracker",
"abstract": "There are virtual reality (VR) wayfinding training systems for people with visual impairment, but there is a lack of studies about how training environments can affect spatial information acquisition of people with visual impairment. Using a VR treadmill and a VR tracker, we studied how walk-in-place and actual walking can affect the acquisition of spatial information with regard to paths and obstacles. Our results show that people with visual impairment remember routes better when trained with VR treadmill, but they remember obstacles better when trained with VR tracker. We evaluate the respective efficacies of these approaches on spatial information memorization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "There are virtual reality (VR) wayfinding training systems for people with visual impairment, but there is a lack of studies about how training environments can affect spatial information acquisition of people with visual impairment. Using a VR treadmill and a VR tracker, we studied how walk-in-place and actual walking can affect the acquisition of spatial information with regard to paths and obstacles. Our results show that people with visual impairment remember routes better when trained with VR treadmill, but they remember obstacles better when trained with VR tracker. We evaluate the respective efficacies of these approaches on spatial information memorization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "There are virtual reality (VR) wayfinding training systems for people with visual impairment, but there is a lack of studies about how training environments can affect spatial information acquisition of people with visual impairment. Using a VR treadmill and a VR tracker, we studied how walk-in-place and actual walking can affect the acquisition of spatial information with regard to paths and obstacles. Our results show that people with visual impairment remember routes better when trained with VR treadmill, but they remember obstacles better when trained with VR tracker. We evaluate the respective efficacies of these approaches on spatial information memorization.",
"fno": "840200a596",
"keywords": [
"Handicapped Aids",
"Interactive Devices",
"Virtual Reality",
"VR Treadmill",
"VR Tracker",
"Spatial Information Memorization",
"VR Wayfinding Training",
"Visual Impairment",
"Wayfinding Training Systems",
"Training Environments",
"Spatial Information Acquisition",
"Training",
"Human Computer Interaction",
"Legged Locomotion",
"Visualization",
"Three Dimensional Displays",
"Conferences",
"Virtual Reality",
"Virtual Reality VR",
"People With Visual Impairment",
"Wayfinding",
"Training",
"Human Centered Computing X 007 E Human Computer Interaction HCI X 007 E Empirical Studies In HCI",
"Human Centered Computing X 007 E Human Computer Interaction HCI X 007 E Interactive Systems And Tools"
],
"authors": [
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Sangsun Han",
"givenName": "Sangsun",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Pilhyoun Yoon",
"givenName": "Pilhyoun",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Miyeon Ha",
"givenName": "Miyeon",
"surname": "Ha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Kibum Kim",
"givenName": "Kibum",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "596-597",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a594",
"articleId": "1CJewqWywOk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a598",
"articleId": "1CJdrSGz23S",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icccnt/2017/3038/0/08204152",
"title": "Understanding implication of VR-assisted treadmill walk on gait-related indices",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2017/08204152/12OmNx4Q6zo",
"parentPublication": {
"id": "proceedings/icccnt/2017/3038/0",
"title": "2017 8th International Conference on Computing, Communication and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2000/0795/0/07950155",
"title": "WearTrack: A Self-Referenced Head and Hand Tracker for Wearable Computers and Portable VR",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2000/07950155/12OmNyugyRH",
"parentPublication": {
"id": "proceedings/iswc/2000/0795/0",
"title": "Digest of Papers. Fourth International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a732",
"title": "Stay Safe! Safety Precautions for Walking on a Conventional Treadmill in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a732/1CJcCMpD8xa",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09925645",
"title": "Strolling in Room-Scale VR: Hex-Core-MK1 Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09925645/1HCQTWI9XgY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciscet/2022/6044/0/604400a191",
"title": "Research on Treadmill Design based on Human-machine Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/iciscet/2022/604400a191/1HbbWesiBkA",
"parentPublication": {
"id": "proceedings/iciscet/2022/6044/0",
"title": "2022 International Conference on Information System, Computing and Educational Technology (ICISCET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrhciai/2022/9182/0/918200a134",
"title": "MIND-VR: A Utility Approach of Human-Computer Interaction in Virtual Space based on Autonomous Consciousness",
"doi": null,
"abstractUrl": "/proceedings-article/vrhciai/2022/918200a134/1LxffWquCrK",
"parentPublication": {
"id": "proceedings/vrhciai/2022/9182/0",
"title": "2022 International Conference on Virtual Reality, Human-Computer Interaction and Artificial Intelligence (VRHCIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797842",
"title": "A pilot study of gaze-gait relations analysis in a VR environment using HMD and LRF",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797842/1cJ15kwNxnO",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089462",
"title": "VR Bridges: Simulating Smooth Uneven Surfaces in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089462/1jIxeZPD4LS",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089561",
"title": "Real Walking in Place: HEX-CORE-PROTOTYPE Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089561/1jIxfncHjNe",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0ZaKBbJS",
"doi": "10.1109/VR.2019.8797789",
"title": "VR-Replay: Capturing and Replaying Avatars in VR for Asynchronous 3D Collaborative Design",
"normalizedTitle": "VR-Replay: Capturing and Replaying Avatars in VR for Asynchronous 3D Collaborative Design",
"abstract": "Distributed teams rely on asynchronous CMC tools to complete collaborative tasks due to the difficulties and costs surrounding scheduling synchronous communications. In this paper, we present VR-Replay, a new communication tool that records and replays avatars with both nonverbal behavior and verbal communication in VR asynchronous collaboration. We describe a study comparing VR-Replay with a desktop-based CVE with audio annotation and a VR immersive CVE with audio annotation. Our results suggest that viewing the replay avatar in VR-Replay improves teamwork, causing people to view their partners as more likable, warm, and friendly. 75% of the users chose VR-Replay as the preferred communication tool in our study.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Distributed teams rely on asynchronous CMC tools to complete collaborative tasks due to the difficulties and costs surrounding scheduling synchronous communications. In this paper, we present VR-Replay, a new communication tool that records and replays avatars with both nonverbal behavior and verbal communication in VR asynchronous collaboration. We describe a study comparing VR-Replay with a desktop-based CVE with audio annotation and a VR immersive CVE with audio annotation. Our results suggest that viewing the replay avatar in VR-Replay improves teamwork, causing people to view their partners as more likable, warm, and friendly. 75% of the users chose VR-Replay as the preferred communication tool in our study.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Distributed teams rely on asynchronous CMC tools to complete collaborative tasks due to the difficulties and costs surrounding scheduling synchronous communications. In this paper, we present VR-Replay, a new communication tool that records and replays avatars with both nonverbal behavior and verbal communication in VR asynchronous collaboration. We describe a study comparing VR-Replay with a desktop-based CVE with audio annotation and a VR immersive CVE with audio annotation. Our results suggest that viewing the replay avatar in VR-Replay improves teamwork, causing people to view their partners as more likable, warm, and friendly. 75% of the users chose VR-Replay as the preferred communication tool in our study.",
"fno": "08797789",
"keywords": [
"Avatars",
"Groupware",
"VR Asynchronous Collaboration",
"VR Immersive CVE",
"Replay Avatar",
"Preferred Communication Tool",
"Asynchronous CMC Tools",
"Asynchronous 3 D Collaborative Design",
"VR Replay",
"Avatars",
"Tools",
"Three Dimensional Displays",
"Task Analysis",
"Teamwork",
"Prototypes",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Cornell University",
"fullName": "Cheng Yao Wang",
"givenName": "Cheng Yao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cornell University",
"fullName": "Logan Drumm",
"givenName": "Logan",
"surname": "Drumm",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cornell University",
"fullName": "Christopher Troup",
"givenName": "Christopher",
"surname": "Troup",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cornell University",
"fullName": "Yingjie Ding",
"givenName": "Yingjie",
"surname": "Ding",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cornell University",
"fullName": "Andrea Stevenson Won",
"givenName": "Andrea",
"surname": "Stevenson Won",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1215-1216",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798305",
"articleId": "1cJ0S7AlGtq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798363",
"articleId": "1cJ0JNak6w8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892240",
"title": "Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892240/12OmNwGZNLp",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892372",
"title": "Demonstration: Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892372/12OmNz2C1zq",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798363",
"title": "RelivelnVR: Capturing and Reliving Virtual Reality Experiences Together",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798363/1cJ0JNak6w8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798169",
"title": "Stand-alone, Wearable System for Full Body VR Avatars: Towards Physics-based 3D Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798169/1cJ126EVaVi",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798100",
"title": "Towards a Framework on Accessible and Social VR in Education",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798100/1cJ16Rutlm0",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797971",
"title": "360-Degree Photo-realistic VR Conferencing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797971/1cJ1b26beEg",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a555",
"title": "Enhancing Participation Experience in VR Live Concerts by Improving Motions of Virtual Audience Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a555/1pyswu13B4Y",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a065",
"title": "The Embodiment of Photorealistic Avatars Influences Female Body Weight Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a065/1tuAAOZpdoQ",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a040",
"title": "A VR Application for the Virtual Fitting of Fashion Garments on Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a040/1yeQGzhOswM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a139",
"title": "VR Collaboration in Large Companies: An Interview Study on the Role of Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a139/1yeQK6CDe3C",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnX59fALbG",
"doi": "10.1109/VRW52623.2021.00239",
"title": "Exploring Body Gestures for Small Object Selection in Dense Environment in HMD VR for Data Visualization Applications",
"normalizedTitle": "Exploring Body Gestures for Small Object Selection in Dense Environment in HMD VR for Data Visualization Applications",
"abstract": "Recent years have seen incredible growth of Virtual Reality (VR) interfaces. In the area of data visualization and analytics, VR applications offers immense opportunities to interact, modify and explore 3D data in an interactive manner, which help in establishing new trends and patterns. In this regard, object selection is of primary importance. It is the fundamental and initial task in any immersive VR. However, the current literature is limited in investigating the effectiveness of object selection techniques in different VEs including dense and occluded dense VE, varied object sizes, proximity, and distances in the area of Immersive Analytics. In this paper, I present my ongoing PhD research to explore controller-less gestures for nail-size object selection on HMD-VR interface for dense and occluded dense VE. I describe the experiments, the findings and my future studies. I believe the outcome of these experiments in the form of guidelines or framework will enable researchers to design controller-less body gestures for object selection task.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent years have seen incredible growth of Virtual Reality (VR) interfaces. In the area of data visualization and analytics, VR applications offers immense opportunities to interact, modify and explore 3D data in an interactive manner, which help in establishing new trends and patterns. In this regard, object selection is of primary importance. It is the fundamental and initial task in any immersive VR. However, the current literature is limited in investigating the effectiveness of object selection techniques in different VEs including dense and occluded dense VE, varied object sizes, proximity, and distances in the area of Immersive Analytics. In this paper, I present my ongoing PhD research to explore controller-less gestures for nail-size object selection on HMD-VR interface for dense and occluded dense VE. I describe the experiments, the findings and my future studies. I believe the outcome of these experiments in the form of guidelines or framework will enable researchers to design controller-less body gestures for object selection task.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent years have seen incredible growth of Virtual Reality (VR) interfaces. In the area of data visualization and analytics, VR applications offers immense opportunities to interact, modify and explore 3D data in an interactive manner, which help in establishing new trends and patterns. In this regard, object selection is of primary importance. It is the fundamental and initial task in any immersive VR. However, the current literature is limited in investigating the effectiveness of object selection techniques in different VEs including dense and occluded dense VE, varied object sizes, proximity, and distances in the area of Immersive Analytics. In this paper, I present my ongoing PhD research to explore controller-less gestures for nail-size object selection on HMD-VR interface for dense and occluded dense VE. I describe the experiments, the findings and my future studies. I believe the outcome of these experiments in the form of guidelines or framework will enable researchers to design controller-less body gestures for object selection task.",
"fno": "405700a713",
"keywords": [
"Data Visualisation",
"Helmet Mounted Displays",
"Virtual Reality",
"Immersive VR",
"Dense VE Object",
"Immersive Analytics",
"Controller Less Gestures",
"Nail Size Object Selection",
"HMD VR Interface",
"Body Gestures",
"Object Selection Task",
"Dense Environment",
"Data Visualization Applications",
"Virtual Reality Interfaces",
"VR Applications",
"Human Computer Interaction",
"Three Dimensional Displays",
"Conferences",
"Design Methodology",
"Data Visualization",
"Virtual Reality",
"Resists",
"Virtual Reality",
"Object Selection",
"Immersive Analytics",
"Dense And Occluded Virtual Environment",
"Nail Size Objects"
],
"authors": [
{
"affiliation": "Indian Institute of Technology (IIT),Guwahati,India",
"fullName": "Shimmila Bhowmick",
"givenName": "Shimmila",
"surname": "Bhowmick",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "713-714",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a711",
"articleId": "1tnWAy4pkyY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a715",
"articleId": "1tnXsX6EMBa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/02/mcg2018020015",
"title": "Human-Centered VR Design: Five Essentials Every Engineer Needs to Know",
"doi": null,
"abstractUrl": "/magazine/cg/2018/02/mcg2018020015/13rRUx0xPyV",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a683",
"title": "Answering With Bow and Arrow: Questionnaires and VR Blend Without Distorting the Outcome",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a683/1CJbQ0Iu1zO",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a964",
"title": "Mid-air Haptic Texture Exploration in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a964/1CJeOwwf1Nm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a596",
"title": "VR Wayfinding Training for People with Visual Impairment using VR Treadmill and VR Tracker",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a596/1CJf4aHcqoU",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a905",
"title": "Haptics in VR Using Origami-Augmented Drones",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a905/1J7WrPcWIVO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798147",
"title": "[DC] Designing VR for Teamwork: The Influence of HMD VR Communication Capabilities on Teamwork Competencies",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798147/1cJ0HhK5ANW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798238",
"title": "Text Typing in VR Using Smartphones Touchscreen and HMD",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798238/1cJ0Qw94bi8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090496",
"title": "The influence of text rotation, font and distance on legibility in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090496/1jIxn3eRfnq",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a735",
"title": "[DC] Towards Universal VR Sickness Mitigation Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXkpvZfqg",
"doi": "10.1109/VRW52623.2021.00133",
"title": "Visual Indicators for Monitoring Students in a VR class",
"normalizedTitle": "Visual Indicators for Monitoring Students in a VR class",
"abstract": "Remote classes using VR technology are gaining recognition when in-person meetings are difficult or risky. We designed an immersive VR interface with several visual cues to support teacher awareness of students and their actions, attention, and temperament in a social VR environment. This interface keeps relevant information about students within the teacher’s visual field of attention and has options to reduce the amount of information presented. Pilot study participants preferred to see all student indicators in one place and suggested we minimize the amount of information displayed to focus on the most urgent students.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Remote classes using VR technology are gaining recognition when in-person meetings are difficult or risky. We designed an immersive VR interface with several visual cues to support teacher awareness of students and their actions, attention, and temperament in a social VR environment. This interface keeps relevant information about students within the teacher’s visual field of attention and has options to reduce the amount of information presented. Pilot study participants preferred to see all student indicators in one place and suggested we minimize the amount of information displayed to focus on the most urgent students.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Remote classes using VR technology are gaining recognition when in-person meetings are difficult or risky. We designed an immersive VR interface with several visual cues to support teacher awareness of students and their actions, attention, and temperament in a social VR environment. This interface keeps relevant information about students within the teacher’s visual field of attention and has options to reduce the amount of information presented. Pilot study participants preferred to see all student indicators in one place and suggested we minimize the amount of information displayed to focus on the most urgent students.",
"fno": "405700a502",
"keywords": [
"Computer Aided Instruction",
"Educational Administrative Data Processing",
"Teaching",
"User Interfaces",
"Virtual Reality",
"Student Indicators",
"Visual Indicators",
"Student Monitoring",
"Remote Classes",
"Immersive VR Interface",
"Teacher Awareness",
"Social VR Environment",
"Visualization",
"Three Dimensional Displays",
"Conferences",
"Virtual Reality",
"User Interfaces",
"Monitoring",
"Human Centered Computing",
"Virtual Reality"
],
"authors": [
{
"affiliation": "University of Louisiana at Lafayette,Lafayette,Louisiana,United States",
"fullName": "David M Broussard",
"givenName": "David M",
"surname": "Broussard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Louisiana at Lafayette,Lafayette,Louisiana,United States",
"fullName": "Yitoshee Rahman",
"givenName": "Yitoshee",
"surname": "Rahman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Louisiana at Lafayette,Lafayette,Louisiana,United States",
"fullName": "Arun K Kulshreshth",
"givenName": "Arun K",
"surname": "Kulshreshth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Louisiana at Lafayette,Lafayette,Louisiana,United States",
"fullName": "Christoph W Borst",
"givenName": "Christoph W",
"surname": "Borst",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "502-503",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnXjVMmzBe",
"name": "pvrw202140570-09419345s1-mm_405700a502.zip",
"size": "82.5 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419345s1-mm_405700a502.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a500",
"articleId": "1tnWZ1klXdS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a504",
"articleId": "1tnXyTs22BO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08448286",
"title": "Teacher-Guided Educational VR: Assessment of Live and Prerecorded Teachers Guiding Virtual Field Trips",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448286/13bd1eSlyt6",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446312",
"title": "VR-Assisted vs Video-Assisted Teacher Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446312/13bd1eY1x42",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446437",
"title": "Fluid VR: Extended Object Associations for Automatic Mode Switching in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446437/13bd1ftOBCR",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446614",
"title": "Using Industrial Robots as Haptic Devices for VR-Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446614/13bd1h03qOq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a146",
"title": "Remapping Control in VR for Patients with AMD",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a146/1MNgUIqIDQc",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798106",
"title": "VR-MOOCs: A Learning Management System for VR Education",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798106/1cJ0Pvi3gwo",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a284",
"title": "An Interface for Enhanced Teacher Awareness of Student Actions and Attention in a VR Classroom",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a284/1tnXZIKSGAM",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a320",
"title": "Collaborative learning in VR for cross-disciplinary distributed student teams",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a320/1tnXyiYiebK",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a342",
"title": "VR multi-class",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a342/1vg7TbTA6ju",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1vg7AGzvxNC",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1vg7TbTA6ju",
"doi": "10.1109/ICVRV51359.2020.00093",
"title": "VR multi-class",
"normalizedTitle": "VR multi-class",
"abstract": "VR multi-dimensional classroom is a complete VR immersive course learning system developed and launched by Zhihai Yuntian. Students can put on a helmet and have an immersive learning experience, while teachers can control the course process through the teacher's side.",
"abstracts": [
{
"abstractType": "Regular",
"content": "VR multi-dimensional classroom is a complete VR immersive course learning system developed and launched by Zhihai Yuntian. Students can put on a helmet and have an immersive learning experience, while teachers can control the course process through the teacher's side.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "VR multi-dimensional classroom is a complete VR immersive course learning system developed and launched by Zhihai Yuntian. Students can put on a helmet and have an immersive learning experience, while teachers can control the course process through the teacher's side.",
"fno": "049700a342",
"keywords": [
"Computer Aided Instruction",
"Educational Courses",
"Teaching",
"Virtual Reality",
"VR Multiclass",
"VR Multidimensional Classroom",
"Complete VR Immersive Course Learning System",
"Zhihai Yuntian",
"Immersive Learning Experience",
"Course Process",
"Learning Systems",
"Visualization",
"Head",
"Process Control",
"Virtual Reality",
"Safety",
"Zhihai Yuntian",
"Virtual Reality",
"Classroom",
"Learning",
"Education"
],
"authors": [
{
"affiliation": "Research and Development Center Qingdao Zhihai Yuntian Information Technology Co., LTD,Qingdao,China",
"fullName": "Yunyang Zhang",
"givenName": "Yunyang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research and Development Center Qingdao Zhihai Yuntian Information Technology Co., LTD,Qingdao,China",
"fullName": "Fan Zhang",
"givenName": "Fan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research and Development Center Qingdao Zhihai Yuntian Information Technology Co., LTD,Qingdao,China",
"fullName": "Yingjie Kong",
"givenName": "Yingjie",
"surname": "Kong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research and Development Center Qingdao Zhihai Yuntian Information Technology Co., LTD,Qingdao,China",
"fullName": "Chengpeng Tang",
"givenName": "Chengpeng",
"surname": "Tang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "342-343",
"year": "2020",
"issn": "2375-141X",
"isbn": "978-1-6654-0497-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1vg7SGlgFQQ",
"name": "picvrv202004970-09479850s1-mm_049700a342.zip",
"size": "96.9 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/picvrv202004970-09479850s1-mm_049700a342.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "049700a340",
"articleId": "1vg7QG06UcE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "049700a344",
"articleId": "1vg8nt81fd6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pg/2002/1784/0/17840318",
"title": "Wandering in VR Environments by Estimating Head Pose Using an Omnicam",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840318/12OmNBtCCJD",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/searis/2012/1249/0/06231166",
"title": "VR JuggLua: A framework for VR applications combining Lua, OpenSceneGraph, and VR Juggler",
"doi": null,
"abstractUrl": "/proceedings-article/searis/2012/06231166/12OmNrkT7GZ",
"parentPublication": {
"id": "proceedings/searis/2012/1249/0",
"title": "2012 5th Workshop on Software Engineering and Architectures for Realtime Interactive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi-t/2010/4233/0/4233a010",
"title": "Multi-projector VR Systems",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi-t/2010/4233a010/12OmNyRg4BE",
"parentPublication": {
"id": "proceedings/sibgrapi-t/2010/4233/0",
"title": "2010 23RD SIBGRAPI - Conference on Graphics, Patterns and Images Tutorials",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446312",
"title": "VR-Assisted vs Video-Assisted Teacher Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446312/13bd1eY1x42",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2008/02/mcg2008020094",
"title": "Future Standards for Immersive VR: Report on the IEEE Virtual Reality 2007 Workshop",
"doi": null,
"abstractUrl": "/magazine/cg/2008/02/mcg2008020094/13rRUB6SpRX",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/02/mcg2018020057",
"title": "An Analysis of VR Technology Used in Immersive Simulations with a Serious Game Perspective",
"doi": null,
"abstractUrl": "/magazine/cg/2018/02/mcg2018020057/13rRUwh80Nv",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a093",
"title": "VR, Deepfakes and Epistemic Security",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a093/1KmFcdUEcb6",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvris/2019/5050/0/505000a059",
"title": "Research on the Artistic Characteristics of VR Films",
"doi": null,
"abstractUrl": "/proceedings-article/icvris/2019/505000a059/1fHkc7eWgU0",
"parentPublication": {
"id": "proceedings/icvris/2019/5050/0",
"title": "2019 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a502",
"title": "Visual Indicators for Monitoring Students in a VR class",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a502/1tnXkpvZfqg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a358",
"title": "Integrated Application of BIM and VR Technology in Architectural Interactive Design and Construction",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a358/1vg7Xg6sLLy",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxdm4Is",
"title": "Eighth International Symposium on Wearable Computers",
"acronym": "iswc",
"groupId": "1000810",
"volume": "0",
"displayVolume": "1",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBqv2p0",
"doi": "10.1109/ISWC.2004.19",
"title": "Expert Chording Text Entry on the Twiddler One-Handed Keyboard",
"normalizedTitle": "Expert Chording Text Entry on the Twiddler One-Handed Keyboard",
"abstract": "Previously we demonstrated that after 400 minutes of practice, ten novices averaged over 26 words per minute (wpm) for text entry on the Twiddler one-handed chording keyboard, outperforming the multi-tap mobile text entry standard. Here we present a study that examines expert chording performance. Our five participants achieved an average rate of 47 wpm after approximately 25 hours of practice in varying conditions. One subject achieved a rate of 67 wpm, equivalent to the typing rate of the last author who has been a Twiddler user for ten years. We analyze the effects of learning on various aspects of chording, provide evidence that lack of visual feedback does not hinder expert typing speed and examine the potential use of multi-character chords (MCCs) to increase text entry speed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Previously we demonstrated that after 400 minutes of practice, ten novices averaged over 26 words per minute (wpm) for text entry on the Twiddler one-handed chording keyboard, outperforming the multi-tap mobile text entry standard. Here we present a study that examines expert chording performance. Our five participants achieved an average rate of 47 wpm after approximately 25 hours of practice in varying conditions. One subject achieved a rate of 67 wpm, equivalent to the typing rate of the last author who has been a Twiddler user for ten years. We analyze the effects of learning on various aspects of chording, provide evidence that lack of visual feedback does not hinder expert typing speed and examine the potential use of multi-character chords (MCCs) to increase text entry speed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Previously we demonstrated that after 400 minutes of practice, ten novices averaged over 26 words per minute (wpm) for text entry on the Twiddler one-handed chording keyboard, outperforming the multi-tap mobile text entry standard. Here we present a study that examines expert chording performance. Our five participants achieved an average rate of 47 wpm after approximately 25 hours of practice in varying conditions. One subject achieved a rate of 67 wpm, equivalent to the typing rate of the last author who has been a Twiddler user for ten years. We analyze the effects of learning on various aspects of chording, provide evidence that lack of visual feedback does not hinder expert typing speed and examine the potential use of multi-character chords (MCCs) to increase text entry speed.",
"fno": "21860094",
"keywords": [],
"authors": [
{
"affiliation": "Georgia Institute of Technology, Atlanta, GA",
"fullName": "Kent Lyons",
"givenName": "Kent",
"surname": "Lyons",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Georgia Institute of Technology, Atlanta, GA",
"fullName": "Daniel Plaisted",
"givenName": "Daniel",
"surname": "Plaisted",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Georgia Institute of Technology, Atlanta, GA",
"fullName": "Thad Starner",
"givenName": "Thad",
"surname": "Starner",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iswc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-10-01T00:00:00",
"pubType": "proceedings",
"pages": "94-101",
"year": "2004",
"issn": "1530-0811",
"isbn": "0-7695-2186-X",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "21860085",
"articleId": "12OmNzyGH3C",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "21860102",
"articleId": "12OmNqBbHPe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpads/2014/7615/0/07097812",
"title": "Virtual keyboard for head mounted display-based wearable devices",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097812/12OmNqzu6VX",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dexa/2005/2424/0/24240891",
"title": "Four-Key Text Entry Augmented with Color Blinking Feedback for Print-Handicapped People with Ocular Pathology",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2005/24240891/12OmNvjQ95C",
"parentPublication": {
"id": "proceedings/dexa/2005/2424/0",
"title": "16th International Workshop on Database and Expert Systems Applications (DEXA'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09737726",
"title": "MyoKey: Inertial Motion Sensing and Gesture-based QWERTY Keyboard for Extended Realities",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09737726/1BQlEBR0ceY",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a694",
"title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874256",
"title": "Efficient Flower Text Entry in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874256/1GjwONKhl84",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2019/9148/0/08767420",
"title": "HIBEY: Hide the Keyboard in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2019/08767420/1bQzm74HXBm",
"parentPublication": {
"id": "proceedings/percom/2019/9148/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797754",
"title": "A Capacitive-sensing Physical Keyboard for VR Text Entry",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797754/1cJ1cJDgPXq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2019/0987/0/08943750",
"title": "Performance Envelopes of Virtual Keyboard Text Input Strategies in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2019/08943750/1grONbj2rYc",
"parentPublication": {
"id": "proceedings/ismar/2019/0987/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2023/05/09625777",
"title": "AirText: One-Handed Text Entry in the Air for COTS Smartwatches",
"doi": null,
"abstractUrl": "/journal/tm/2023/05/09625777/1yLTpxhX8oU",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1eSlysI",
"doi": "10.1109/VR.2018.8446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"normalizedTitle": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"abstract": "We study the performance and user experience of two popular mainstream text entry devices, desktop keyboards and touchscreen keyboards, for use in Virtual Reality (VR) applications. We discuss the limitations arising from limited visual feedback, and examine the efficiency of different strategies of use. We analyze a total of 24 hours of typing data in VR from 24 participants and find that novice users are able to retain about 60% of their typing speed on a desktop keyboard and about 40-45% of their typing speed on a touchscreen keyboard. We also find no significant learning effects, indicating that users can transfer their typing skills fast into VR. Besides investigating baseline performances, we study the position in which keyboards and hands are rendered in space. We find that this does not adversely affect performance for desktop keyboard typing and results in a performance trade-off for touchscreen keyboard typing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We study the performance and user experience of two popular mainstream text entry devices, desktop keyboards and touchscreen keyboards, for use in Virtual Reality (VR) applications. We discuss the limitations arising from limited visual feedback, and examine the efficiency of different strategies of use. We analyze a total of 24 hours of typing data in VR from 24 participants and find that novice users are able to retain about 60% of their typing speed on a desktop keyboard and about 40-45% of their typing speed on a touchscreen keyboard. We also find no significant learning effects, indicating that users can transfer their typing skills fast into VR. Besides investigating baseline performances, we study the position in which keyboards and hands are rendered in space. We find that this does not adversely affect performance for desktop keyboard typing and results in a performance trade-off for touchscreen keyboard typing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We study the performance and user experience of two popular mainstream text entry devices, desktop keyboards and touchscreen keyboards, for use in Virtual Reality (VR) applications. We discuss the limitations arising from limited visual feedback, and examine the efficiency of different strategies of use. We analyze a total of 24 hours of typing data in VR from 24 participants and find that novice users are able to retain about 60% of their typing speed on a desktop keyboard and about 40-45% of their typing speed on a touchscreen keyboard. We also find no significant learning effects, indicating that users can transfer their typing skills fast into VR. Besides investigating baseline performances, we study the position in which keyboards and hands are rendered in space. We find that this does not adversely affect performance for desktop keyboard typing and results in a performance trade-off for touchscreen keyboard typing.",
"fno": "08446059",
"keywords": [
"Helmet Mounted Displays",
"Keyboards",
"Learning Artificial Intelligence",
"Touch Sensitive Screens",
"User Interfaces",
"Virtual Reality",
"Immersive Head Mounted Display Based Virtual Reality",
"Standard Keyboards",
"User Experience",
"Popular Mainstream Text Entry Devices",
"Virtual Reality Applications",
"VR",
"Novice Users",
"Typing Speed",
"Typing Skills",
"Baseline Performances",
"Desktop Keyboard Typing",
"Touchscreen Keyboard Typing",
"Keyboards",
"Visualization",
"Electronic Mail",
"Virtual Reality",
"Error Analysis",
"Performance Evaluation",
"User Interfaces",
"H 5 2 User Interfaces Input Devices And Strategies"
],
"authors": [
{
"affiliation": "Coburg University of Applied Sciences and Arts",
"fullName": "Jens Grubert",
"givenName": "Jens",
"surname": "Grubert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Passau",
"fullName": "Lukas Witzani",
"givenName": "Lukas",
"surname": "Witzani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research",
"fullName": "Eyal Ofek",
"givenName": "Eyal",
"surname": "Ofek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research",
"fullName": "Michel Pahud",
"givenName": "Michel",
"surname": "Pahud",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Passau",
"fullName": "Matthias Kranz",
"givenName": "Matthias",
"surname": "Kranz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Cambridge",
"fullName": "Per Ola Kristensson",
"givenName": "Per Ola",
"surname": "Kristensson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "159-166",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446250",
"articleId": "13bd1eTtWYT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446217",
"articleId": "13bd1AITn9W",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iswc/2005/2419/0/24190170",
"title": "The Impacts of Limited Visual Feedback on Mobile Text Entry for the Twiddler and Mini-QWERTY Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2005/24190170/12OmNqG0SQN",
"parentPublication": {
"id": "proceedings/iswc/2005/2419/0",
"title": "Ninth IEEE International Symposium on Wearable Computers (ISWC'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2017/4338/0/07917636",
"title": "Preventing shoulder surfing using randomized augmented reality keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2017/07917636/19wAJpRnCE0",
"parentPublication": {
"id": "proceedings/percom-workshops/2017/4338/0",
"title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a694",
"title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049665",
"title": "Text Input for Non-Stationary XR Workspaces: Investigating Tap and Word-Gesture Keyboards in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049665/1KYooqYQbF6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798238",
"title": "Text Typing in VR Using Smartphones Touchscreen and HMD",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798238/1cJ0Qw94bi8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797740",
"title": "Towards Utilizing Touch-sensitive Physical Keyboards for Text Entry in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797740/1cJ196OGdJm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797754",
"title": "A Capacitive-sensing Physical Keyboard for VR Text Entry",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797754/1cJ1cJDgPXq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794572",
"title": "ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794572/1dXEHv0aKMo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1UXTzDos",
"doi": "10.1109/ISMAR-Adjunct.2018.00051",
"title": "HiKeyb: High-Efficiency Mixed Reality System for Text Entry",
"normalizedTitle": "HiKeyb: High-Efficiency Mixed Reality System for Text Entry",
"abstract": "Text entry is an imperative issue to be addressed in current entry systems for virtual environments (VEs). The entry method using a physical keyboard is still the most dominant choice for an efficient interaction regarding text entry. In this paper, we propose a typing system with a style of mixed reality, which is called HiKeyb, and it possesses a similar high-efficiency with the single physical keyboard in the real environment. The HiKeyb system consists of a depth camera, a pose tracking module, a head-mounted display (HMD), a QWERTY keyboard and a black table mat. This system can guarantee the entry efficiency and the amenity by not only introducing the force feedback from a movable physical keyboard, but also improving the immersion with the real hand image. In addition, the infrared absorption material helps improve the robustness of the system against different lighting environments. Experiments have proved that users wearing HMDs in Virtual Phrases session can achieve an entry rate of 23.1 words per minute and an error rate of 2.76%, and the rate ratio of virtual reality to real world is 78% when typing phrases. Besides, we find that the proposed system can provide a relatively close entry efficiency to that using a pure physical keyboard in the real environment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Text entry is an imperative issue to be addressed in current entry systems for virtual environments (VEs). The entry method using a physical keyboard is still the most dominant choice for an efficient interaction regarding text entry. In this paper, we propose a typing system with a style of mixed reality, which is called HiKeyb, and it possesses a similar high-efficiency with the single physical keyboard in the real environment. The HiKeyb system consists of a depth camera, a pose tracking module, a head-mounted display (HMD), a QWERTY keyboard and a black table mat. This system can guarantee the entry efficiency and the amenity by not only introducing the force feedback from a movable physical keyboard, but also improving the immersion with the real hand image. In addition, the infrared absorption material helps improve the robustness of the system against different lighting environments. Experiments have proved that users wearing HMDs in Virtual Phrases session can achieve an entry rate of 23.1 words per minute and an error rate of 2.76%, and the rate ratio of virtual reality to real world is 78% when typing phrases. Besides, we find that the proposed system can provide a relatively close entry efficiency to that using a pure physical keyboard in the real environment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Text entry is an imperative issue to be addressed in current entry systems for virtual environments (VEs). The entry method using a physical keyboard is still the most dominant choice for an efficient interaction regarding text entry. In this paper, we propose a typing system with a style of mixed reality, which is called HiKeyb, and it possesses a similar high-efficiency with the single physical keyboard in the real environment. The HiKeyb system consists of a depth camera, a pose tracking module, a head-mounted display (HMD), a QWERTY keyboard and a black table mat. This system can guarantee the entry efficiency and the amenity by not only introducing the force feedback from a movable physical keyboard, but also improving the immersion with the real hand image. In addition, the infrared absorption material helps improve the robustness of the system against different lighting environments. Experiments have proved that users wearing HMDs in Virtual Phrases session can achieve an entry rate of 23.1 words per minute and an error rate of 2.76%, and the rate ratio of virtual reality to real world is 78% when typing phrases. Besides, we find that the proposed system can provide a relatively close entry efficiency to that using a pure physical keyboard in the real environment.",
"fno": "08699306",
"keywords": [
"Helmet Mounted Displays",
"Keyboards",
"Virtual Reality",
"High Efficiency Mixed Reality System",
"Text Entry",
"Virtual Environments",
"Typing System",
"Hi Keyb System",
"QWERTY Keyboard",
"Movable Physical Keyboard",
"Depth Camera",
"Pose Tracking Module",
"Head Mounted Display",
"Force Feedback",
"Infrared Absorption Material",
"Augmented Reality",
"Human Centered Computing X 2014 Human Computer Interaction X 2014 Interaction Paradigms X 2014 Virtual Reality",
"Human Centered Computing X 2014 Human Computer Interaction X 2014 HCI Design And Evaluation Methods X 2014 Usability Testing"
],
"authors": [
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Haiyan Jiang",
"givenName": "Haiyan",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Dongdong Weng",
"givenName": "Dongdong",
"surname": "Weng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Zhenliang Zhang",
"givenName": "Zhenliang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "AICFVE of Beijing Film Academy",
"fullName": "Yihua Bao",
"givenName": "Yihua",
"surname": "Bao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Science and Technology on Complex Land Systems Simulation Laboratory",
"fullName": "Yufei Jia",
"givenName": "Yufei",
"surname": "Jia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Mengman Nie",
"givenName": "Mengman",
"surname": "Nie",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "132-137",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699262",
"articleId": "19F1NBUYmEU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699196",
"articleId": "19F1S9YzGbC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446250",
"title": "Effects of Hand Representations for Typing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446250/13bd1eTtWYT",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2017/4338/0/07917636",
"title": "Preventing shoulder surfing using randomized augmented reality keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2017/07917636/19wAJpRnCE0",
"parentPublication": {
"id": "proceedings/percom-workshops/2017/4338/0",
"title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874256",
"title": "Efficient Flower Text Entry in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874256/1GjwONKhl84",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a008",
"title": "Exploring the Impact of Visual Information on Intermittent Typing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a008/1JrR2KZbVXq",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049695",
"title": "CrowbarLimbs: A Fatigue-Reducing Virtual Reality Text Entry Metaphor",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049695/1KYowtn3pok",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797754",
"title": "A Capacitive-sensing Physical Keyboard for VR Text Entry",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797754/1cJ1cJDgPXq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a387",
"title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a387/1gyslQzq07K",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090425",
"title": "Using Augmented Reality to Assist Seated Office Workers’ Data Entry Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090425/1jIxnbCAMxy",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1cJDgPXq",
"doi": "10.1109/VR.2019.8797754",
"title": "A Capacitive-sensing Physical Keyboard for VR Text Entry",
"normalizedTitle": "A Capacitive-sensing Physical Keyboard for VR Text Entry",
"abstract": "In the context of immersive VR Head-Mounted Displays, physical keyboards have been proven to be an efficient typing interface. However, text entry using physical keyboards typically requires external camera-based tracking systems. Touch-sensitive physical keyboards allow for on-surface interaction, with sensing integrated into the keyboard itself, but have not been utilized for VR. We propose to utilize touch-sensitive physical keyboards for text entry as an alternative sensing mechanism for tracking user's fingertips and present a first prototype for VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the context of immersive VR Head-Mounted Displays, physical keyboards have been proven to be an efficient typing interface. However, text entry using physical keyboards typically requires external camera-based tracking systems. Touch-sensitive physical keyboards allow for on-surface interaction, with sensing integrated into the keyboard itself, but have not been utilized for VR. We propose to utilize touch-sensitive physical keyboards for text entry as an alternative sensing mechanism for tracking user's fingertips and present a first prototype for VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the context of immersive VR Head-Mounted Displays, physical keyboards have been proven to be an efficient typing interface. However, text entry using physical keyboards typically requires external camera-based tracking systems. Touch-sensitive physical keyboards allow for on-surface interaction, with sensing integrated into the keyboard itself, but have not been utilized for VR. We propose to utilize touch-sensitive physical keyboards for text entry as an alternative sensing mechanism for tracking user's fingertips and present a first prototype for VR.",
"fno": "08797754",
"keywords": [
"Cameras",
"Helmet Mounted Displays",
"Image Sensors",
"Keyboards",
"Touch Sensitive Screens",
"User Interfaces",
"Virtual Reality",
"Capacitive Sensing Physical Keyboard",
"VR Text Entry",
"Touch Sensitive Physical Keyboards",
"Typing Interface",
"Immersive VR Head Mounted Displays",
"External Camera Based Tracking Systems",
"On Surface Interaction",
"User Fingertip Tracking",
"Keyboards",
"Sensors",
"Virtual Reality",
"Pins",
"User Interfaces",
"Standards",
"Conferences",
"Text Entry",
"Touch",
"Physical Keyboards",
"Virtual Reality",
"Capacitive Sensing",
"H 5 2 User Interfaces Input Devices And Strategies"
],
"authors": [
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Tim Menzner",
"givenName": "Tim",
"surname": "Menzner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Alexander Otte",
"givenName": "Alexander",
"surname": "Otte",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Travis Gesslein",
"givenName": "Travis",
"surname": "Gesslein",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Jens Grubert",
"givenName": "Jens",
"surname": "Grubert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Philipp Gagel",
"givenName": "Philipp",
"surname": "Gagel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Daniel Schneider",
"givenName": "Daniel",
"surname": "Schneider",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1080-1081",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797910",
"articleId": "1cJ0HuNVaU0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798009",
"articleId": "1cJ1a4b7cfS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446250",
"title": "Effects of Hand Representations for Typing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446250/13bd1eTtWYT",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2018/2666/1/266601a339",
"title": "A Japanese Software Keyboard for Tablets that Reduces User Fatigue",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2018/266601a339/144U9b07hJP",
"parentPublication": {
"id": "proceedings/compsac/2018/2666/2",
"title": "2018 IEEE 42nd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a884",
"title": "Studying the Effect of Physical Realism on Time Perception in a HAZMAT VR Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a884/1CJeHh7xkYw",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a694",
"title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a140",
"title": "Direct Interaction Word-Gesture Text Input in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a140/1KmF8k8WXi8",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2019/9148/0/08767420",
"title": "HIBEY: Hide the Keyboard in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2019/08767420/1bQzm74HXBm",
"parentPublication": {
"id": "proceedings/percom/2019/9148/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797740",
"title": "Towards Utilizing Touch-sensitive Physical Keyboards for Text Entry in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797740/1cJ196OGdJm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794572",
"title": "ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794572/1dXEHv0aKMo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a387",
"title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a387/1gyslQzq07K",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXRpzMoSY",
"doi": "10.1109/VRW52623.2021.00147",
"title": "2-Thumbs Typing: A Novel Bimanual Text Entry Method in Virtual Reality Environments",
"normalizedTitle": "2-Thumbs Typing: A Novel Bimanual Text Entry Method in Virtual Reality Environments",
"abstract": "We propose a new technique named 2-Thumbs Typing (2TT) enabling text entry with a touchpad in HTC VIVE controller using two thumbs. 2TT method works similarly with bimanual handwriting input but using new designed uni-stroke gestures considering only strokes’ direction. We first design a set of gestures and improve them to finish the final design by a preliminary study through memory, performance efficiency and ease of use. The initial results show that the 2TT technique is easy and comfortable to use, no additional equipment required and supporting eyes-free entry. 2TT can reach 8.5 words per minute with extensive training.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a new technique named 2-Thumbs Typing (2TT) enabling text entry with a touchpad in HTC VIVE controller using two thumbs. 2TT method works similarly with bimanual handwriting input but using new designed uni-stroke gestures considering only strokes’ direction. We first design a set of gestures and improve them to finish the final design by a preliminary study through memory, performance efficiency and ease of use. The initial results show that the 2TT technique is easy and comfortable to use, no additional equipment required and supporting eyes-free entry. 2TT can reach 8.5 words per minute with extensive training.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a new technique named 2-Thumbs Typing (2TT) enabling text entry with a touchpad in HTC VIVE controller using two thumbs. 2TT method works similarly with bimanual handwriting input but using new designed uni-stroke gestures considering only strokes’ direction. We first design a set of gestures and improve them to finish the final design by a preliminary study through memory, performance efficiency and ease of use. The initial results show that the 2TT technique is easy and comfortable to use, no additional equipment required and supporting eyes-free entry. 2TT can reach 8.5 words per minute with extensive training.",
"fno": "405700a530",
"keywords": [
"Human Computer Interaction",
"Interactive Devices",
"Text Analysis",
"User Interfaces",
"Virtual Reality",
"2 Thumbs Typing",
"Bimanual Text Entry Method",
"Virtual Reality Environments",
"HTC VIVE Controller",
"2 TT Method",
"Uni Stroke Gestures",
"Training",
"Three Dimensional Displays",
"Conferences",
"Memory Management",
"Thumb",
"Virtual Reality",
"User Interfaces",
"Virtual Reality",
"Text Input",
"Uni Stroke Gestures",
"Bimanual Input"
],
"authors": [
{
"affiliation": "Jilin University,College of Software,Changchun,China,130022",
"fullName": "Zigang Zhang",
"givenName": "Zigang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jilin University,Key Laboratory of Symbolic Computation and Knowledge Engineering of Ministry of Education,Changchun,China,130022",
"fullName": "Minghui Sun",
"givenName": "Minghui",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jinan University,College of Information Science and Technology/Cyber Security,Guangzhou,China,510000",
"fullName": "BoYu Gao",
"givenName": "BoYu",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jilin University,Key Laboratory of Symbolic Computation and Knowledge Engineering of Ministry of Education,Changchun,China,130022",
"fullName": "Limin Wang",
"givenName": "Limin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "530-531",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a528",
"articleId": "1tnXg447e7e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a532",
"articleId": "1tnXy7NpnGg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/aina/2006/2466/2/246620655",
"title": "Design and Implementation for A Bimanual Input System on Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2006/246620655/12OmNAObbBk",
"parentPublication": {
"id": "proceedings/aina/2006/2466/2",
"title": "20th International Conference on Advanced Information Networking and Applications - Volume 1 (AINA'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2016/0662/0/0662a358",
"title": "Mobile Augmented Reality Authoring Tool",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2016/0662a358/12OmNAXglVC",
"parentPublication": {
"id": "proceedings/icsc/2016/0662/0",
"title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dcve/2014/5217/0/07160929",
"title": "From 3d bimanual toward distant collaborative interaction techniques: an awareness issue",
"doi": null,
"abstractUrl": "/proceedings-article/3dcve/2014/07160929/12OmNAYoKot",
"parentPublication": {
"id": "proceedings/3dcve/2014/5217/0",
"title": "2014 International Workshop on Collaborative Virtual Environments (3DCVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a228",
"title": "CUDA-Based Real-Time Bimanual Gestures Interaction with the Rhesus Macaque Brain MRI Data",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a228/12OmNCm7Bwg",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504707",
"title": "Depth-based 3D gesture multi-level radial menu for virtual object manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504707/12OmNx3HI96",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446320",
"title": "An Evaluation of Bimanual Gestures on the Microsoft HoloLens",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446320/13bd1f3HvEy",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797974",
"title": "Remapped Physical-Virtual Interfaces with Bimanual Haptic Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797974/1cJ0NcRFX5m",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797754",
"title": "A Capacitive-sensing Physical Keyboard for VR Text Entry",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797754/1cJ1cJDgPXq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089533",
"title": "HiPad: Text entry for Head-Mounted Displays Using Circular Touchpad",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089533/1jIx7JtSOTC",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrMHOd6",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"acronym": "hicss",
"groupId": "1000730",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBh8gXp",
"doi": "10.1109/HICSS.2016.264",
"title": "Behavioral Manifestations of Intercultural Competence in Computer-Mediated Intercultural Learning",
"normalizedTitle": "Behavioral Manifestations of Intercultural Competence in Computer-Mediated Intercultural Learning",
"abstract": "Online learning has led to an increase in globally distributed students and an increase in intercultural interactions. Within this setting, intercultural communication competence (ICC) is an important factor because easy connection does not necessarily guarantee effective communication. This paper looks at lessons learned from an online intercultural exchange of Thai and U. S. students regarding, behavioral manifestations of ICC in online environments and the influence that culture has on these manifestations. Implications are provided for designing cultural learning exchanges, and helping students develop ICC through improved assessments of ICC in online learning environments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Online learning has led to an increase in globally distributed students and an increase in intercultural interactions. Within this setting, intercultural communication competence (ICC) is an important factor because easy connection does not necessarily guarantee effective communication. This paper looks at lessons learned from an online intercultural exchange of Thai and U. S. students regarding, behavioral manifestations of ICC in online environments and the influence that culture has on these manifestations. Implications are provided for designing cultural learning exchanges, and helping students develop ICC through improved assessments of ICC in online learning environments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Online learning has led to an increase in globally distributed students and an increase in intercultural interactions. Within this setting, intercultural communication competence (ICC) is an important factor because easy connection does not necessarily guarantee effective communication. This paper looks at lessons learned from an online intercultural exchange of Thai and U. S. students regarding, behavioral manifestations of ICC in online environments and the influence that culture has on these manifestations. Implications are provided for designing cultural learning exchanges, and helping students develop ICC through improved assessments of ICC in online learning environments.",
"fno": "5670c085",
"keywords": [
"Cultural Differences",
"Facebook",
"Encoding",
"Global Communication",
"Reflection",
"Electronic Mail",
"Atmospheric Measurements",
"Behavior",
"Intercultural Communication Competence",
"Online Learning"
],
"authors": [
{
"affiliation": null,
"fullName": "Lisa Mei-ling Chuang",
"givenName": "Lisa Mei-ling",
"surname": "Chuang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Daniel D. Suthers",
"givenName": "Daniel D.",
"surname": "Suthers",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2016-01-01T00:00:00",
"pubType": "proceedings",
"pages": "2085-2094",
"year": "2016",
"issn": "1530-1605",
"isbn": "978-0-7695-5670-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5670c084",
"articleId": "12OmNBEpnE4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5670c095",
"articleId": "12OmNBC8Azw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2017/5920/0/08190698",
"title": "Cultivating global mindsets without leaving campus: Building interculturally competent engineer",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2017/08190698/12OmNxwENkn",
"parentPublication": {
"id": "proceedings/fie/2017/5920/0",
"title": "2017 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659209",
"title": "Facilitating Intercultural Development: Preparing Future Engineers for Multidisciplinary Teams and Multicultural Environments",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659209/18j98GoGIog",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise-ie/2021/3829/0/382900a282",
"title": "A Case Study on Cultivation of Intercultural Communication Competent: An Approach of Collaborative Blended teaching in EFL Classroom",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900a282/1C8GDwa4DBK",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a227",
"title": "Effects of Technology-Supported Cross-cultural Communications on Learners’ Culture and Communication Competences",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a227/1FUUl1Sv99C",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873978",
"title": "Use Virtual Reality to Enhance Intercultural Sensitivity: A Randomised Parallel Longitudinal Study",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873978/1GjwOm9uWbe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cste/2022/8188/0/818800a055",
"title": "Impact of Instructors’ Content Knowledge on Their Online Intercultural English Teaching Instructional Beliefs",
"doi": null,
"abstractUrl": "/proceedings-article/cste/2022/818800a055/1J7W6eZUNa0",
"parentPublication": {
"id": "proceedings/cste/2022/8188/0",
"title": "2022 4th International Conference on Computer Science and Technologies in Education (CSTE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlbdbi/2019/5094/0/509400a074",
"title": "An Empirical Study on the Relationship between Using Motivation of Internet Language and Intercultural Communication Competence of College Students",
"doi": null,
"abstractUrl": "/proceedings-article/mlbdbi/2019/509400a074/1gjRGK2tjdm",
"parentPublication": {
"id": "proceedings/mlbdbi/2019/5094/0",
"title": "2019 International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2020/8961/0/09274093",
"title": "A KCI Approach to Promote Intercultural Competencies for International Virutal Engineering Student Teams (InVEST)",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2020/09274093/1phRKcJmIQo",
"parentPublication": {
"id": "proceedings/fie/2020/8961/0",
"title": "2020 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeim/2020/9623/0/962300a150",
"title": "A Study on the Practical Teaching of Intercultural Communication Competence Based on the International Development of Higher Vocational College",
"doi": null,
"abstractUrl": "/proceedings-article/icmeim/2020/962300a150/1syveFmiYdW",
"parentPublication": {
"id": "proceedings/icmeim/2020/9623/0",
"title": "2020 International Conference on Modern Education and Information Management (ICMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieit/2021/2563/0/256300a613",
"title": "Study of Intercultural Communication Training in Interpreting Teaching Based on Multimedia Technology",
"doi": null,
"abstractUrl": "/proceedings-article/ieit/2021/256300a613/1wHKoq6Hrwc",
"parentPublication": {
"id": "proceedings/ieit/2021/2563/0",
"title": "2021 International Conference on Internet, Education and Information Technology (IEIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNsbGvCQ",
"title": "2016 30th International Conference on Advanced Information Networking and Applications Workshops (WAINA)",
"acronym": "waina",
"groupId": "1001766",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNButpWc",
"doi": "10.1109/WAINA.2016.115",
"title": "Socio-Cultural Adaptation Approach to Enhance Intercultural Collaboration and Learning",
"normalizedTitle": "Socio-Cultural Adaptation Approach to Enhance Intercultural Collaboration and Learning",
"abstract": "Although the growing interest in the development of Computer Supported Collaborative Learning (CSCL) environments, major existing systems ignore the variety of learners and their socio-cultural differences, especially in the case of distant learning. Usually, only one and common system is provided to all users whatever their cultural backgrounds are. More than ever, it becomes challenging issue to provide socio-cultural aware collaborative environments able to support intercultural groups in their learning or working activities. Since each learner has her(his) specific socio-cultural values and behaviors, the groupware should adapt to each member's resulting preferences. The main goal of this paper is to present a novel socio-cultural adaptation approach based on and guided by ontologies to adapt CSCL environments to the socio-cultural profiles of its effective users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although the growing interest in the development of Computer Supported Collaborative Learning (CSCL) environments, major existing systems ignore the variety of learners and their socio-cultural differences, especially in the case of distant learning. Usually, only one and common system is provided to all users whatever their cultural backgrounds are. More than ever, it becomes challenging issue to provide socio-cultural aware collaborative environments able to support intercultural groups in their learning or working activities. Since each learner has her(his) specific socio-cultural values and behaviors, the groupware should adapt to each member's resulting preferences. The main goal of this paper is to present a novel socio-cultural adaptation approach based on and guided by ontologies to adapt CSCL environments to the socio-cultural profiles of its effective users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although the growing interest in the development of Computer Supported Collaborative Learning (CSCL) environments, major existing systems ignore the variety of learners and their socio-cultural differences, especially in the case of distant learning. Usually, only one and common system is provided to all users whatever their cultural backgrounds are. More than ever, it becomes challenging issue to provide socio-cultural aware collaborative environments able to support intercultural groups in their learning or working activities. Since each learner has her(his) specific socio-cultural values and behaviors, the groupware should adapt to each member's resulting preferences. The main goal of this paper is to present a novel socio-cultural adaptation approach based on and guided by ontologies to adapt CSCL environments to the socio-cultural profiles of its effective users.",
"fno": "2461a171",
"keywords": [
"Computer Aided Instruction",
"Distance Learning",
"Groupware",
"Ontologies Artificial Intelligence",
"Social Aspects Of Automation",
"Socio Cultural Adaptation Approach",
"Intercultural Collaboration",
"Computer Supported Collaborative Learning",
"Socio Cultural Differences",
"Distant Learning",
"Cultural Backgrounds",
"Socio Cultural Aware Collaborative Environments",
"Intercultural Groups",
"Learning Activities",
"Working Activities",
"Socio Cultural Values",
"Socio Cultural Behaviors",
"Groupware",
"Ontologies",
"CSCL Environments",
"Socio Cultural Profiles",
"Ontologies",
"Collaboration",
"Cultural Differences",
"Collaborative Work",
"Computers",
"Cognition",
"Bibliographies",
"CSCL",
"Socio Cultural Profile",
"Adaptation",
"Ontology"
],
"authors": [
{
"affiliation": "Ecole Nat. des Sci. de l'Inf., Univ. de la Manouba, Manouba, Tunisia",
"fullName": "Fadoua Ouamani",
"givenName": "Fadoua",
"surname": "Ouamani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ecole Nat. des Sci. de l'Inf., Universite de la Manouba, Manouba, Tunisia",
"fullName": "Narjes Bellamine Ben Saoud",
"givenName": "Narjes",
"surname": "Bellamine Ben Saoud",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ecole Nat. des Sci. de l'Inf., Univ. de la Manouba, Manouba, Tunisia",
"fullName": "Henda Hajjami Ben Ghezala",
"givenName": "Henda",
"surname": "Hajjami Ben Ghezala",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "waina",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "171-176",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-2461-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2461a165",
"articleId": "12OmNzw8j3j",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2461a177",
"articleId": "12OmNxFsmrB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bcgin/2012/4854/0/4854a343",
"title": "Differences between Sino- American Cultural Values from Intercultural Communication Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/bcgin/2012/4854a343/12OmNqAU6DA",
"parentPublication": {
"id": "proceedings/bcgin/2012/4854/0",
"title": "2012 Second International Conference on Business Computing and Global Informatization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2001/0981/0/00926191",
"title": "On inter-organizational EC collaboration-the impact of inter-cultural communication apprehension",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2001/00926191/12OmNvvLi3F",
"parentPublication": {
"id": "proceedings/hicss/2001/0981/2",
"title": "Proceedings of the 34th Annual Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349474",
"title": "Emotion and music: A view from the cultural psychology of music",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349474/12OmNwtEEDf",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2014/7100/0/07073234",
"title": "Operationalization of an ontology based sociocultural adaptation approach and its application to CSCL",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2014/07073234/12OmNxZTtIk",
"parentPublication": {
"id": "proceedings/aiccsa/2014/7100/0",
"title": "2014 IEEE/ACS 11th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/passat-socialcom/2011/1931/0/06113177",
"title": "Towards a Generic Socio-cultural Profile for Collaborative Environments",
"doi": null,
"abstractUrl": "/proceedings-article/passat-socialcom/2011/06113177/12OmNy4r3QD",
"parentPublication": {
"id": "proceedings/passat-socialcom/2011/1931/0",
"title": "2011 IEEE Third Int'l Conference on Privacy, Security, Risk and Trust (PASSAT) / 2011 IEEE Third Int'l Conference on Social Computing (SocialCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wetice/2014/4249/0/4249a235",
"title": "Track Report of Collaboration Tools for Preservation of Environment and Cultural Heritage (COPECH 2014)",
"doi": null,
"abstractUrl": "/proceedings-article/wetice/2014/4249a235/12OmNyO8tQp",
"parentPublication": {
"id": "proceedings/wetice/2014/4249/0",
"title": "2014 IEEE 23rd International Workshops on Enabling Technologies: Infrastructures for Collaborative Enterprise (WETICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/circus/2009/4097/0/05457325",
"title": "Collaboration and Intercultural Issues on Requirements:Communication, Understanding and Softskills (CIRCUS)",
"doi": null,
"abstractUrl": "/proceedings-article/circus/2009/05457325/12OmNylboxe",
"parentPublication": {
"id": "proceedings/circus/2009/4097/0",
"title": "Requirements: Communication, Understanding and Softskills, Collaboration and Intercultural Issues on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2012/4702/0/4702a388",
"title": "Is It Adequate to Model the Socio-cultural Dimension of E-learners by Informing a Fixed Set of Personal Criteria?",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2012/4702a388/12OmNyuPLmw",
"parentPublication": {
"id": "proceedings/icalt/2012/4702/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2017/2937/0/2937a308",
"title": "Indicators of Country Similarity in Terms of Music Taste, Cultural, and Socio-economic Factors",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a308/12OmNzTYBNX",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/06/09205611",
"title": "Cartographic Design of Cultural Maps",
"doi": null,
"abstractUrl": "/magazine/cg/2020/06/09205611/1nnSRMhXKMM",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNApcu9n",
"title": "2012 Second International Conference on Business Computing and Global Informatization",
"acronym": "bcgin",
"groupId": "1800481",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqAU6DA",
"doi": "10.1109/BCGIN.2012.95",
"title": "Differences between Sino- American Cultural Values from Intercultural Communication Perspective",
"normalizedTitle": "Differences between Sino- American Cultural Values from Intercultural Communication Perspective",
"abstract": "With the globalization, intercultural communication has become inevitable experience for Chinese people who are embracing people all over the world in her strive for modernization and internationalization. Cultural differences have become great obstacles to intercultural communication. Cultural value is the core of shared perceptions, existing only in the minds of people, which provides a norm of behaving, thinking, recognizing, socializing, deducing, evaluating, and make people orient themselves to it. On the basis of the framework of value orientations by Kluckhohn and Stodtbeck, this paper attempts to make a contrastive analysis of differences of Sino-American cultural values, provides insights into the virtues bases of Sino-American cultural values, i.e., Chinese five cardinal virtues and Western four cardinal virtues, explores the influences of cardinal virtues on value dimensions by Hofstede, proposes implications to intercultural communication in order to overcome potential barriers caused by cultural differences. As a qualitative study, this paper adopts the methods of consulting relative materials and makes a comparative analysis in order to promote mutual understanding between Chinese and Americans.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the globalization, intercultural communication has become inevitable experience for Chinese people who are embracing people all over the world in her strive for modernization and internationalization. Cultural differences have become great obstacles to intercultural communication. Cultural value is the core of shared perceptions, existing only in the minds of people, which provides a norm of behaving, thinking, recognizing, socializing, deducing, evaluating, and make people orient themselves to it. On the basis of the framework of value orientations by Kluckhohn and Stodtbeck, this paper attempts to make a contrastive analysis of differences of Sino-American cultural values, provides insights into the virtues bases of Sino-American cultural values, i.e., Chinese five cardinal virtues and Western four cardinal virtues, explores the influences of cardinal virtues on value dimensions by Hofstede, proposes implications to intercultural communication in order to overcome potential barriers caused by cultural differences. As a qualitative study, this paper adopts the methods of consulting relative materials and makes a comparative analysis in order to promote mutual understanding between Chinese and Americans.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the globalization, intercultural communication has become inevitable experience for Chinese people who are embracing people all over the world in her strive for modernization and internationalization. Cultural differences have become great obstacles to intercultural communication. Cultural value is the core of shared perceptions, existing only in the minds of people, which provides a norm of behaving, thinking, recognizing, socializing, deducing, evaluating, and make people orient themselves to it. On the basis of the framework of value orientations by Kluckhohn and Stodtbeck, this paper attempts to make a contrastive analysis of differences of Sino-American cultural values, provides insights into the virtues bases of Sino-American cultural values, i.e., Chinese five cardinal virtues and Western four cardinal virtues, explores the influences of cardinal virtues on value dimensions by Hofstede, proposes implications to intercultural communication in order to overcome potential barriers caused by cultural differences. As a qualitative study, this paper adopts the methods of consulting relative materials and makes a comparative analysis in order to promote mutual understanding between Chinese and Americans.",
"fno": "4854a343",
"keywords": [
"Cultural Differences",
"Global Communication",
"Humans",
"Education",
"Uncertainty",
"Presses",
"Ethics",
"Virtue",
"Intercultural Communication",
"Cultural Value"
],
"authors": [
{
"affiliation": null,
"fullName": "Wang Ying",
"givenName": "Wang",
"surname": "Ying",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bcgin",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-10-01T00:00:00",
"pubType": "proceedings",
"pages": "343-346",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-4469-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4854a339",
"articleId": "12OmNCdk2Am",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4854a347",
"articleId": "12OmNBInLn6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccis/2010/4270/0/4270a908",
"title": "An Ontology Evolution Model Supporting Intercultural Collaboration Environment",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270a908/12OmNAQJzJ9",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eeee/2009/3907/0/3907a271",
"title": "Intercultural Understanding of E-Learning: Case Study of eChina~UK Program",
"doi": null,
"abstractUrl": "/proceedings-article/eeee/2009/3907a271/12OmNAoUTqs",
"parentPublication": {
"id": "proceedings/eeee/2009/3907/0",
"title": "2009 International Conference on E-Learning, E-Business, Enterprise Information Systems, and E-Government (EEEE 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciii/2011/4523/2/4523b121",
"title": "Intercultural Communication Competence Training through Network Platform and Field Training",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2011/4523b121/12OmNB9t6xs",
"parentPublication": {
"id": "proceedings/iciii/2011/4523/2",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2016/5670/0/5670c085",
"title": "Behavioral Manifestations of Intercultural Competence in Computer-Mediated Intercultural Learning",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670c085/12OmNBh8gXp",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2007/1083/0/04417880",
"title": "Work in progress - Development of intercultural sensitivity from study abroad programs",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2007/04417880/12OmNrK9q3m",
"parentPublication": {
"id": "proceedings/fie/2007/1083/0",
"title": "2007 37th Annual Frontiers in Education Conference - Global Engineering: Knowledge Without Borders, Opportunities Without Passports",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dese/2010/4160/0/4160a119",
"title": "Implementation of an E-Learning Module in Virtual Centre for Entrepreneurship: The Development of Cultural Awareness in Students",
"doi": null,
"abstractUrl": "/proceedings-article/dese/2010/4160a119/12OmNwDj0Wb",
"parentPublication": {
"id": "proceedings/dese/2010/4160/0",
"title": "2010 Developments in E-systems Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipcc/2005/9027/0/01494202",
"title": "Paralogic hermeneutics: an alternative approach to teaching intercultural communication in a technical writing course",
"doi": null,
"abstractUrl": "/proceedings-article/ipcc/2005/01494202/12OmNzAohW1",
"parentPublication": {
"id": "proceedings/ipcc/2005/9027/0",
"title": "2005 IEEE International Professional Communication Conference (IPCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itei/2021/8050/0/805000a087",
"title": "Intercultural Communication Strategies of Chinese Cultural Symbols via Online Videos",
"doi": null,
"abstractUrl": "/proceedings-article/itei/2021/805000a087/1CzeMMgVm5W",
"parentPublication": {
"id": "proceedings/itei/2021/8050/0",
"title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlbdbi/2019/5094/0/509400a074",
"title": "An Empirical Study on the Relationship between Using Motivation of Internet Language and Intercultural Communication Competence of College Students",
"doi": null,
"abstractUrl": "/proceedings-article/mlbdbi/2019/509400a074/1gjRGK2tjdm",
"parentPublication": {
"id": "proceedings/mlbdbi/2019/5094/0",
"title": "2019 International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a009",
"title": "A study on the cultivation model of intercultural communicative competence in foreign language teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a009/1tnYl4u5nWw",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNywfKyE",
"title": "2007 37th Annual Frontiers in Education Conference - Global Engineering: Knowledge Without Borders, Opportunities Without Passports",
"acronym": "fie",
"groupId": "1000297",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrK9q3m",
"doi": "10.1109/FIE.2007.4417880",
"title": "Work in progress - Development of intercultural sensitivity from study abroad programs",
"normalizedTitle": "Work in progress - Development of intercultural sensitivity from study abroad programs",
"abstract": "Study abroad programs offer enormous potential to develop graduates who have the ability to function effectively in intercultural contexts. Programs less than a semester in duration are becoming more common and may enable more engineering students to participate. This Work in Progress describes a qualitative, exploratory study of ways in which engineering students describe their cross-cultural experiences in a variety of shorter-term, experiential study abroad programs. The extent to which student characteristics and program design factors may affect development of intercultural sensitivity will also be investigated.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Study abroad programs offer enormous potential to develop graduates who have the ability to function effectively in intercultural contexts. Programs less than a semester in duration are becoming more common and may enable more engineering students to participate. This Work in Progress describes a qualitative, exploratory study of ways in which engineering students describe their cross-cultural experiences in a variety of shorter-term, experiential study abroad programs. The extent to which student characteristics and program design factors may affect development of intercultural sensitivity will also be investigated.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Study abroad programs offer enormous potential to develop graduates who have the ability to function effectively in intercultural contexts. Programs less than a semester in duration are becoming more common and may enable more engineering students to participate. This Work in Progress describes a qualitative, exploratory study of ways in which engineering students describe their cross-cultural experiences in a variety of shorter-term, experiential study abroad programs. The extent to which student characteristics and program design factors may affect development of intercultural sensitivity will also be investigated.",
"fno": "04417880",
"keywords": [
"Engineering Education",
"Intercultural Sensitivity",
"Study Abroad Program"
],
"authors": [
{
"affiliation": "Worcester Polytech. Inst., Worcester",
"fullName": "C. Demetry",
"givenName": "C.",
"surname": "Demetry",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-10-01T00:00:00",
"pubType": "proceedings",
"pages": "T2A-4-T2A-5",
"year": "2007",
"issn": null,
"isbn": "978-1-4244-1083-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04417879",
"articleId": "12OmNyRxFtH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04417881",
"articleId": "12OmNyr8Ybb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2007/1083/0/04417796",
"title": "Work in progress - fine-tuning an engineering study abroad program",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2007/04417796/12OmNB9bvcT",
"parentPublication": {
"id": "proceedings/fie/2007/1083/0",
"title": "2007 37th Annual Frontiers in Education Conference - Global Engineering: Knowledge Without Borders, Opportunities Without Passports",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2009/3557/3/3557f097",
"title": "An Empirical Research on Scenario Training Method of Intercultural Communication Capacity and Its Performance Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2009/3557f097/12OmNC943Mu",
"parentPublication": {
"id": "proceedings/etcs/2009/3557/3",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bcgin/2012/4854/0/4854a343",
"title": "Differences between Sino- American Cultural Values from Intercultural Communication Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/bcgin/2012/4854a343/12OmNqAU6DA",
"parentPublication": {
"id": "proceedings/bcgin/2012/4854/0",
"title": "2012 Second International Conference on Business Computing and Global Informatization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2004/8552/0/01408525",
"title": "The development of a summer study abroad program for engineering and computer science students at Baylor University Cynthia C. Fry",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2004/01408525/12OmNqH9hrs",
"parentPublication": {
"id": "proceedings/fie/2004/8552/0",
"title": "34th Annual Frontiers in Education, 2004. FIE 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2008/3496/1/3496a004",
"title": "Service-Oriented Collective Intelligence for Intercultural Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2008/3496a004/12OmNqzcvOu",
"parentPublication": {
"id": "proceedings/wi-iat/2008/3496/1",
"title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2010/6261/0/05673154",
"title": "Sugarcane and corn: Biofuel-based study abroad programs in Brazil",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2010/05673154/12OmNwkR5BI",
"parentPublication": {
"id": "proceedings/fie/2010/6261/0",
"title": "2010 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659209",
"title": "Facilitating Intercultural Development: Preparing Future Engineers for Multidisciplinary Teams and Multicultural Environments",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659209/18j98GoGIog",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2021/2420/0/242000a249",
"title": "Can Online Study Abroad Programs During Covid-19 Promote Global Competencies?",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2021/242000a249/1Eb2MUSv46k",
"parentPublication": {
"id": "proceedings/iiai-aai/2021/2420/0",
"title": "2021 10th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873978",
"title": "Use Virtual Reality to Enhance Intercultural Sensitivity: A Randomised Parallel Longitudinal Study",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873978/1GjwOm9uWbe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2020/8961/0/09274038",
"title": "Demonstrating the Impact of International Collaborative Disciplinary Experiences on Student Global, International, and Intercultural Competencies",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2020/09274038/1phRLUT3LQk",
"parentPublication": {
"id": "proceedings/fie/2020/8961/0",
"title": "2020 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "18j8XijMg2k",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"acronym": "fie",
"groupId": "1000297",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "18j98GoGIog",
"doi": "10.1109/FIE.2018.8659209",
"title": "Facilitating Intercultural Development: Preparing Future Engineers for Multidisciplinary Teams and Multicultural Environments",
"normalizedTitle": "Facilitating Intercultural Development: Preparing Future Engineers for Multidisciplinary Teams and Multicultural Environments",
"abstract": "This innovative practice full paper describes an inclusive on-campus intercultural development program that can be applied in engineering learning environments. A major outcome of engineering programs across the nation is to graduate professional engineers who are able to work and communicate effectively in multicultural environments and multidisciplinary teams. Therefore, developing multicultural/intercultural competencies is an important aspect of engineering programs. Generally, study abroad programs are seen as an effective means to develop intercultural/multicultural competence. However, the opportunity to study abroad is often not accessible for all students, particularly those from traditionally underrepresented racial and ethnic backgrounds. This paper describes the design, implementation, and assessment of an on campus intercultural development program, and the outcomes based on quantitative and qualitative data collected from two separate cohorts of engineering students during 2016 and 2017. The program is grounded in a developmental intercultural paradigm - Developmental Model for Intercultural Sensitivity (DMIS) and Intercultural Development Continuum (IDC) - with an intentional focus on constructs identified on the Intercultural Knowledge & Competence VALUE Rubric. The outcomes and effectiveness are assessed using the Intercultural Development Inventory (IDI) and the Attitudes, Skills, and Knowledge Short Scale (ASKS<sup>2+</sup>). The overall results suggest that by intentionally focusing on the cognitive, affective, and behavioral dimensions of intercultural development, coupled with a developmental approach, it is possible to achieve intercultural.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This innovative practice full paper describes an inclusive on-campus intercultural development program that can be applied in engineering learning environments. A major outcome of engineering programs across the nation is to graduate professional engineers who are able to work and communicate effectively in multicultural environments and multidisciplinary teams. Therefore, developing multicultural/intercultural competencies is an important aspect of engineering programs. Generally, study abroad programs are seen as an effective means to develop intercultural/multicultural competence. However, the opportunity to study abroad is often not accessible for all students, particularly those from traditionally underrepresented racial and ethnic backgrounds. This paper describes the design, implementation, and assessment of an on campus intercultural development program, and the outcomes based on quantitative and qualitative data collected from two separate cohorts of engineering students during 2016 and 2017. The program is grounded in a developmental intercultural paradigm - Developmental Model for Intercultural Sensitivity (DMIS) and Intercultural Development Continuum (IDC) - with an intentional focus on constructs identified on the Intercultural Knowledge & Competence VALUE Rubric. The outcomes and effectiveness are assessed using the Intercultural Development Inventory (IDI) and the Attitudes, Skills, and Knowledge Short Scale (ASKS<sup>2+</sup>). The overall results suggest that by intentionally focusing on the cognitive, affective, and behavioral dimensions of intercultural development, coupled with a developmental approach, it is possible to achieve intercultural.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This innovative practice full paper describes an inclusive on-campus intercultural development program that can be applied in engineering learning environments. A major outcome of engineering programs across the nation is to graduate professional engineers who are able to work and communicate effectively in multicultural environments and multidisciplinary teams. Therefore, developing multicultural/intercultural competencies is an important aspect of engineering programs. Generally, study abroad programs are seen as an effective means to develop intercultural/multicultural competence. However, the opportunity to study abroad is often not accessible for all students, particularly those from traditionally underrepresented racial and ethnic backgrounds. This paper describes the design, implementation, and assessment of an on campus intercultural development program, and the outcomes based on quantitative and qualitative data collected from two separate cohorts of engineering students during 2016 and 2017. The program is grounded in a developmental intercultural paradigm - Developmental Model for Intercultural Sensitivity (DMIS) and Intercultural Development Continuum (IDC) - with an intentional focus on constructs identified on the Intercultural Knowledge & Competence VALUE Rubric. The outcomes and effectiveness are assessed using the Intercultural Development Inventory (IDI) and the Attitudes, Skills, and Knowledge Short Scale (ASKS2+). The overall results suggest that by intentionally focusing on the cognitive, affective, and behavioral dimensions of intercultural development, coupled with a developmental approach, it is possible to achieve intercultural.",
"fno": "08659209",
"keywords": [
"Educational Courses",
"Engineering Education",
"Further Education",
"Professional Aspects",
"Multidisciplinary Teams",
"Multicultural Environments",
"On Campus Intercultural Development Program",
"Engineering Learning Environments",
"Engineering Programs",
"Multicultural Intercultural Competencies",
"Intercultural Multicultural Competence",
"Ethnic Backgrounds",
"Engineering Students",
"Intercultural Sensitivity",
"Racial Backgrounds",
"Rubric",
"Developmental Intercultural Paradigm",
"Intercultural Development Inventory",
"Intercultural Development Continuum",
"IDI",
"IDC",
"Cultural Differences",
"Training",
"Global Communication",
"Reflection",
"Mentoring",
"Atmospheric Measurements",
"Particle Measurements",
"Intercultural Competence",
"On Campus Internationalization",
"Multidisciplinary Teams",
"Multicultural Environments"
],
"authors": [
{
"affiliation": "Purdue University, West Lafayette, Indiana, United States",
"fullName": "Darshini N. Render",
"givenName": "Darshini N.",
"surname": "Render",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University, West Lafayette, Indiana, United States",
"fullName": "Horane A. Holgate",
"givenName": "Horane A.",
"surname": "Holgate",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University, West Lafayette, Indiana, United States",
"fullName": "Charles A. Calahan",
"givenName": "Charles A.",
"surname": "Calahan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-9",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-1174-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08658988",
"articleId": "18j999FVvcQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08658426",
"articleId": "18j9toJ3wkM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2016/5670/0/5670c085",
"title": "Behavioral Manifestations of Intercultural Competence in Computer-Mediated Intercultural Learning",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670c085/12OmNBh8gXp",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2017/5920/0/08190448",
"title": "Improving communication in multicultural teams — A web-based model and its application in project management education",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2017/08190448/12OmNqJHFpq",
"parentPublication": {
"id": "proceedings/fie/2017/5920/0",
"title": "2017 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kam/2011/1788/0/06137708",
"title": "The Significance of the Promotion of Education Equity by Multicultural Education",
"doi": null,
"abstractUrl": "/proceedings-article/kam/2011/06137708/12OmNwK7o58",
"parentPublication": {
"id": "proceedings/kam/2011/1788/0",
"title": "2011 Fourth International Symposium on Knowledge Acquisition and Modeling",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2017/5920/0/08190697",
"title": "Preparing students for intensive global fieldwork",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2017/08190697/12OmNwl8GHg",
"parentPublication": {
"id": "proceedings/fie/2017/5920/0",
"title": "2017 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2017/5920/0/08190698",
"title": "Cultivating global mindsets without leaving campus: Building interculturally competent engineer",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2017/08190698/12OmNxwENkn",
"parentPublication": {
"id": "proceedings/fie/2017/5920/0",
"title": "2017 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/circus/2009/4097/0/05457325",
"title": "Collaboration and Intercultural Issues on Requirements:Communication, Understanding and Softskills (CIRCUS)",
"doi": null,
"abstractUrl": "/proceedings-article/circus/2009/05457325/12OmNylboxe",
"parentPublication": {
"id": "proceedings/circus/2009/4097/0",
"title": "Requirements: Communication, Understanding and Softskills, Collaboration and Intercultural Issues on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873978",
"title": "Use Virtual Reality to Enhance Intercultural Sensitivity: A Randomised Parallel Longitudinal Study",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873978/1GjwOm9uWbe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2020/8961/0/09274093",
"title": "A KCI Approach to Promote Intercultural Competencies for International Virutal Engineering Student Teams (InVEST)",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2020/09274093/1phRKcJmIQo",
"parentPublication": {
"id": "proceedings/fie/2020/8961/0",
"title": "2020 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeim/2020/9623/0/962300a150",
"title": "A Study on the Practical Teaching of Intercultural Communication Competence Based on the International Development of Higher Vocational College",
"doi": null,
"abstractUrl": "/proceedings-article/icmeim/2020/962300a150/1syveFmiYdW",
"parentPublication": {
"id": "proceedings/icmeim/2020/9623/0",
"title": "2020 International Conference on Modern Education and Information Management (ICMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieit/2021/2563/0/256300a613",
"title": "Study of Intercultural Communication Training in Interpreting Teaching Based on Multimedia Technology",
"doi": null,
"abstractUrl": "/proceedings-article/ieit/2021/256300a613/1wHKoq6Hrwc",
"parentPublication": {
"id": "proceedings/ieit/2021/2563/0",
"title": "2021 International Conference on Internet, Education and Information Technology (IEIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJdEJiFbBC",
"doi": "10.1109/VRW55335.2022.00158",
"title": "The Immediate and Retained Effects of One-time Virtual Reality Exposure on Intercultural Sensitivity",
"normalizedTitle": "The Immediate and Retained Effects of One-time Virtual Reality Exposure on Intercultural Sensitivity",
"abstract": "This study aims to investigate the immediate and retained effects of one-time virtual reality (VR) exposure on intercultural sensitivity (IS) and identify the contributing factors. Three virtual scenarios about the ethnic minorities in Hong Kong were created for the empirical study. The longitudinal results involving 30 participants (15M 15F) showed that both the immediate and retained effects of the one-time VR exposure on IS were significant. Moreover, linear growth curve models suggested that among the female participants, presence and emotional empathy were closely associated with the change of IS over time, but this relation was not significant among the males.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study aims to investigate the immediate and retained effects of one-time virtual reality (VR) exposure on intercultural sensitivity (IS) and identify the contributing factors. Three virtual scenarios about the ethnic minorities in Hong Kong were created for the empirical study. The longitudinal results involving 30 participants (15M 15F) showed that both the immediate and retained effects of the one-time VR exposure on IS were significant. Moreover, linear growth curve models suggested that among the female participants, presence and emotional empathy were closely associated with the change of IS over time, but this relation was not significant among the males.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study aims to investigate the immediate and retained effects of one-time virtual reality (VR) exposure on intercultural sensitivity (IS) and identify the contributing factors. Three virtual scenarios about the ethnic minorities in Hong Kong were created for the empirical study. The longitudinal results involving 30 participants (15M 15F) showed that both the immediate and retained effects of the one-time VR exposure on IS were significant. Moreover, linear growth curve models suggested that among the female participants, presence and emotional empathy were closely associated with the change of IS over time, but this relation was not significant among the males.",
"fno": "840200a614",
"keywords": [
"Human Factors",
"Virtual Reality",
"One Time VR Exposure",
"One Time Virtual Reality Exposure",
"Intercultural Sensitivity",
"Immediate Retained Effects",
"Virtual Scenarios",
"Female Participants",
"Hong Kong",
"Emotional Empathy",
"Human Computer Interaction",
"Solid Modeling",
"Sensitivity",
"Three Dimensional Displays",
"Conferences",
"Virtual Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality",
"Human Centered Computing Human Computer Interaction HCI Empirical Studies In HCI"
],
"authors": [
{
"affiliation": "Hong Kong Polytechnic University",
"fullName": "Richard Chen Li",
"givenName": "Richard Chen",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "City University of Hong Kong",
"fullName": "Angel Lo Lo Kon",
"givenName": "Angel Lo",
"surname": "Lo Kon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University College London",
"fullName": "Justin Juk Man So",
"givenName": "Justin Juk",
"surname": "Man So",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "City University of Hong Kong",
"fullName": "Horace Ho Shing Ip",
"givenName": "Horace Ho",
"surname": "Shing Ip",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "614-615",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJdEFqO8H6",
"name": "pvrw202284020-09757551s1-mm_840200a614.zip",
"size": "497 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757551s1-mm_840200a614.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a612",
"articleId": "1CJcLWf2s4U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a616",
"articleId": "1CJcDYLoOUU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vrais/1995/7084/0/70840035",
"title": "Implications of balance disturbances following exposure to virtual reality systems",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1995/70840035/12OmNrkT7ul",
"parentPublication": {
"id": "proceedings/vrais/1995/7084/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a864",
"title": "Immersive Visualization of Sneeze Simulation Data on Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a864/1CJeZEcFVmg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwecai/2022/7997/0/799700a339",
"title": "The Treatment and Development Prospect of VR Exposure Therapy for Mental Diseases",
"doi": null,
"abstractUrl": "/proceedings-article/iwecai/2022/799700a339/1Cugpye9cxW",
"parentPublication": {
"id": "proceedings/iwecai/2022/7997/0",
"title": "2022 3rd International Conference on Electronic Communication and Artificial Intelligence (IWECAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873978",
"title": "Use Virtual Reality to Enhance Intercultural Sensitivity: A Randomised Parallel Longitudinal Study",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873978/1GjwOm9uWbe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrhciai/2022/9182/0/918200a072",
"title": "Retinal Microvascular Segmentation Algorithm based on Multi-scale Attention Mechanism",
"doi": null,
"abstractUrl": "/proceedings-article/vrhciai/2022/918200a072/1LxfgzNUwcE",
"parentPublication": {
"id": "proceedings/vrhciai/2022/9182/0",
"title": "2022 International Conference on Virtual Reality, Human-Computer Interaction and Artificial Intelligence (VRHCIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089588",
"title": "Manipulating Puppets in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089588/1jIxbTl2uRi",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089531",
"title": "Toward Virtual Reality-based Evaluation of Robot Navigation among People",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089531/1jIxd4kMUJW",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090611",
"title": "Pain Experience in Social VR: The Competing Effect on Objective Pain Tolerance and Subjective Pain Perception",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090611/1jIxokdBogo",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2021/2463/0/246300b137",
"title": "Creating a Virtual Reality OER Application to Teach Web Accessibility",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2021/246300b137/1wLcJBwF9mM",
"parentPublication": {
"id": "proceedings/compsac/2021/2463/0",
"title": "2021 IEEE 45th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2021/3225/0/322500a173",
"title": "Detecting Mental Workload in Virtual Reality Using EEG Spectral Data: A Deep Learning Approach",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2021/322500a173/1zxLujKqBMs",
"parentPublication": {
"id": "proceedings/aivr/2021/3225/0",
"title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1phRpGBKvFS",
"title": "2020 IEEE Frontiers in Education Conference (FIE)",
"acronym": "fie",
"groupId": "1000297",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1phRTdiP7nG",
"doi": "10.1109/FIE44824.2020.9274181",
"title": "Cultural Intelligence and Experiences in International Engineering Programs",
"normalizedTitle": "Cultural Intelligence and Experiences in International Engineering Programs",
"abstract": "Given increasingly globalized economic trends and the global nature of many of the most pressing humanitarian problems the world faces, this full paper addresses engineers' needs to be prepared with the skills and competencies necessary to work with diverse clients and peers and on projects that span international borders. To foster such skills, engineering schools across the country are offering more international engineering programs at the undergraduate level. These programs, charged with the important task of preparing future engineers for globalized work, need to be appropriately evaluated to determine their effectiveness in promoting intercultural outcomes. Similarly, it is important to understand which aspects of these programs are most essential to helping foster intercultural outcomes in students. Therefore, in this paper, we investigate the following research questions: 1) Is there a significant change in students' cultural intelligence before and after they participate in international engineering programs, and 2) What relationships exist between students' cultural intelligence and their experiences in intercultural educational programs, when controlling for student characteristics and program variables? The answers to both questions are of great importance to the field of engineering, as they provide evidence to inform decisions around how to create, modify, and structure international programs in engineering. We found that participants' cultural intelligence as measured by the Cultural Intelligence Survey [13] significantly grew from pretest to posttest on the overall cultural intelligence (CQ) measure, as well as all four cultural intelligence dimensions. We also found that students' experiences abroad- specifically the frequency with which they built relationships and had meaningful conversations about culture with locals-was positively related to an increase in CQ score from pretest to posttest. Together, these findings suggest that not only are international programs in engineering successful in promoting intercultural outcomes, but also that students' frequency of interaction with local people is an important component of successful international programs. These findings have the potential to shape a range of international programs to better meet the needs of both future engineers and the clients, employers, and members of society that stand to benefit from their readiness to tackle global engineering challenges.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Given increasingly globalized economic trends and the global nature of many of the most pressing humanitarian problems the world faces, this full paper addresses engineers' needs to be prepared with the skills and competencies necessary to work with diverse clients and peers and on projects that span international borders. To foster such skills, engineering schools across the country are offering more international engineering programs at the undergraduate level. These programs, charged with the important task of preparing future engineers for globalized work, need to be appropriately evaluated to determine their effectiveness in promoting intercultural outcomes. Similarly, it is important to understand which aspects of these programs are most essential to helping foster intercultural outcomes in students. Therefore, in this paper, we investigate the following research questions: 1) Is there a significant change in students' cultural intelligence before and after they participate in international engineering programs, and 2) What relationships exist between students' cultural intelligence and their experiences in intercultural educational programs, when controlling for student characteristics and program variables? The answers to both questions are of great importance to the field of engineering, as they provide evidence to inform decisions around how to create, modify, and structure international programs in engineering. We found that participants' cultural intelligence as measured by the Cultural Intelligence Survey [13] significantly grew from pretest to posttest on the overall cultural intelligence (CQ) measure, as well as all four cultural intelligence dimensions. We also found that students' experiences abroad- specifically the frequency with which they built relationships and had meaningful conversations about culture with locals-was positively related to an increase in CQ score from pretest to posttest. Together, these findings suggest that not only are international programs in engineering successful in promoting intercultural outcomes, but also that students' frequency of interaction with local people is an important component of successful international programs. These findings have the potential to shape a range of international programs to better meet the needs of both future engineers and the clients, employers, and members of society that stand to benefit from their readiness to tackle global engineering challenges.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Given increasingly globalized economic trends and the global nature of many of the most pressing humanitarian problems the world faces, this full paper addresses engineers' needs to be prepared with the skills and competencies necessary to work with diverse clients and peers and on projects that span international borders. To foster such skills, engineering schools across the country are offering more international engineering programs at the undergraduate level. These programs, charged with the important task of preparing future engineers for globalized work, need to be appropriately evaluated to determine their effectiveness in promoting intercultural outcomes. Similarly, it is important to understand which aspects of these programs are most essential to helping foster intercultural outcomes in students. Therefore, in this paper, we investigate the following research questions: 1) Is there a significant change in students' cultural intelligence before and after they participate in international engineering programs, and 2) What relationships exist between students' cultural intelligence and their experiences in intercultural educational programs, when controlling for student characteristics and program variables? The answers to both questions are of great importance to the field of engineering, as they provide evidence to inform decisions around how to create, modify, and structure international programs in engineering. We found that participants' cultural intelligence as measured by the Cultural Intelligence Survey [13] significantly grew from pretest to posttest on the overall cultural intelligence (CQ) measure, as well as all four cultural intelligence dimensions. We also found that students' experiences abroad- specifically the frequency with which they built relationships and had meaningful conversations about culture with locals-was positively related to an increase in CQ score from pretest to posttest. Together, these findings suggest that not only are international programs in engineering successful in promoting intercultural outcomes, but also that students' frequency of interaction with local people is an important component of successful international programs. These findings have the potential to shape a range of international programs to better meet the needs of both future engineers and the clients, employers, and members of society that stand to benefit from their readiness to tackle global engineering challenges.",
"fno": "09274181",
"keywords": [
"Computer Aided Instruction",
"Computer Science Education",
"Cultural Aspects",
"Educational Courses",
"Educational Institutions",
"Engineering Education",
"Globalisation",
"Innovation Management",
"Project Management",
"CQ Score",
"Cultural Intelligence Survey",
"Global Engineering Challenges",
"International Programs",
"Cultural Intelligence Dimensions",
"Cultural Intelligence Measure",
"Structure International Programs",
"Program Variables",
"Intercultural Educational Programs",
"Engineering Schools",
"International Borders",
"Increasingly Globalized Economic Trends",
"International Engineering Programs",
"Cultural Differences",
"Instruments",
"Particle Measurements",
"Atmospheric Measurements",
"Sensitivity",
"Engineering Students",
"Task Analysis",
"International Programs",
"Cultural Intelligence",
"Study Abroad",
"Experiences"
],
"authors": [
{
"affiliation": "University of Michigan,Center for the Study of Higher and Postsecondary Education,Ann Arbor,MI",
"fullName": "Katie A. Shoemaker",
"givenName": "Katie A.",
"surname": "Shoemaker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Michigan,Department of Biomedical Engineering,Ann Arbor,MI",
"fullName": "Aileen Huang-Saad",
"givenName": "Aileen",
"surname": "Huang-Saad",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Michigan,International Programs in Engineering,Ann Arbor,MI",
"fullName": "Miranda Roberts",
"givenName": "Miranda",
"surname": "Roberts",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-9",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-8961-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09274155",
"articleId": "1phRuYcPO2k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09274166",
"articleId": "1phRPIwXIGY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2014/3922/0/07044297",
"title": "Comparison of the impact of two research experiences for undergraduate programs on preparing students for global workforces",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2014/07044297/12OmNC4eSHi",
"parentPublication": {
"id": "proceedings/fie/2014/3922/0",
"title": "2014 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2017/3681/0/3681a031",
"title": "Cultural Factors Influencing International Collaborative Software Engineering Education in China",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2017/3681a031/12OmNCwUmx8",
"parentPublication": {
"id": "proceedings/apsec/2017/3681/0",
"title": "2017 24th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bcgin/2012/4854/0/4854a343",
"title": "Differences between Sino- American Cultural Values from Intercultural Communication Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/bcgin/2012/4854a343/12OmNqAU6DA",
"parentPublication": {
"id": "proceedings/bcgin/2012/4854/0",
"title": "2012 Second International Conference on Business Computing and Global Informatization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2015/8454/0/07344355",
"title": "Examining how international experiences promote global competency among engineering graduate students",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2015/07344355/12OmNyKa60o",
"parentPublication": {
"id": "proceedings/fie/2015/8454/0",
"title": "2015 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08658692",
"title": "Lessons learned from International Service Learning Projects : Students’s Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08658692/18j9uxwgSEo",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873978",
"title": "Use Virtual Reality to Enhance Intercultural Sensitivity: A Randomised Parallel Longitudinal Study",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873978/1GjwOm9uWbe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2022/6244/0/09962715",
"title": "Cross-cultural mentorships with Black and Brown US STEM Doctoral Students: Unpacking the Perceptions of International Faculty",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2022/09962715/1IHo9xXrlMk",
"parentPublication": {
"id": "proceedings/fie/2022/6244/0",
"title": "2022 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2019/1746/0/09028675",
"title": "Development and Contribution to Students’ Intercultural Skills: A Case Study of an International Collaborative Site",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2019/09028675/1iff8JV6LBe",
"parentPublication": {
"id": "proceedings/fie/2019/1746/0",
"title": "2019 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2020/8961/0/09274093",
"title": "A KCI Approach to Promote Intercultural Competencies for International Virutal Engineering Student Teams (InVEST)",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2020/09274093/1phRKcJmIQo",
"parentPublication": {
"id": "proceedings/fie/2020/8961/0",
"title": "2020 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2020/8666/0/866600a602",
"title": "Relying on multi-modal contextual cross-cultural communication ability training big data analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2020/866600a602/1wRIzxMnOX6",
"parentPublication": {
"id": "proceedings/icicta/2020/8666/0",
"title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1syvbTjoXdu",
"title": "2020 International Conference on Modern Education and Information Management (ICMEIM)",
"acronym": "icmeim",
"groupId": "1840284",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1syveFmiYdW",
"doi": "10.1109/ICMEIM51375.2020.00041",
"title": "A Study on the Practical Teaching of Intercultural Communication Competence Based on the International Development of Higher Vocational College",
"normalizedTitle": "A Study on the Practical Teaching of Intercultural Communication Competence Based on the International Development of Higher Vocational College",
"abstract": "In the context of international development of vocational education, the research intended to find out the appropriate ways to improve students' intercultural communication competence in college English class and tended to analyze the effects of the teaching reform. 128 students were asked to participate in the survey after the new teaching design and SPSS data-process confirmed that this new teaching design was effective in upgrading students' intercultural communication competence. Consequently, the new and effective way of teaching needs to be carried out in broader context to improve vocational colleges students' intercultural communication competence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the context of international development of vocational education, the research intended to find out the appropriate ways to improve students' intercultural communication competence in college English class and tended to analyze the effects of the teaching reform. 128 students were asked to participate in the survey after the new teaching design and SPSS data-process confirmed that this new teaching design was effective in upgrading students' intercultural communication competence. Consequently, the new and effective way of teaching needs to be carried out in broader context to improve vocational colleges students' intercultural communication competence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the context of international development of vocational education, the research intended to find out the appropriate ways to improve students' intercultural communication competence in college English class and tended to analyze the effects of the teaching reform. 128 students were asked to participate in the survey after the new teaching design and SPSS data-process confirmed that this new teaching design was effective in upgrading students' intercultural communication competence. Consequently, the new and effective way of teaching needs to be carried out in broader context to improve vocational colleges students' intercultural communication competence.",
"fno": "962300a150",
"keywords": [
"Computer Aided Instruction",
"Computer Science Education",
"Education",
"Educational Courses",
"Educational Institutions",
"Further Education",
"Teaching",
"Vocational Training",
"Practical Teaching",
"Intercultural Communication Competence",
"International Development",
"Higher Vocational College",
"Vocational Education",
"College English Class",
"Teaching Reform",
"Teaching Design",
"Vocational Colleges Students",
"Training",
"Terminology",
"Education",
"Public Speaking",
"Cross Cultural Communication",
"Information Management",
"Cultural Differences",
"Intercultural Communication Competence",
"Teaching Design",
"International Development"
],
"authors": [
{
"affiliation": "Chengdu Polytechnic Chengdu,International Education Department,People's Republic of China",
"fullName": "Xia Shan",
"givenName": "Xia",
"surname": "Shan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmeim",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-09-01T00:00:00",
"pubType": "proceedings",
"pages": "150-154",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9623-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "962300a146",
"articleId": "1syvrjqmCeQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "962300a155",
"articleId": "1syvkI55sqs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciii/2011/4523/2/4523b121",
"title": "Intercultural Communication Competence Training through Network Platform and Field Training",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2011/4523b121/12OmNB9t6xs",
"parentPublication": {
"id": "proceedings/iciii/2011/4523/2",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2016/5670/0/5670c085",
"title": "Behavioral Manifestations of Intercultural Competence in Computer-Mediated Intercultural Learning",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670c085/12OmNBh8gXp",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2011/4335/0/4335a704",
"title": "English and Vocational Integration Developing Communicative Competence",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2011/4335a704/12OmNwIpNqo",
"parentPublication": {
"id": "proceedings/cso/2011/4335/0",
"title": "2011 Fourth International Joint Conference on Computational Sciences and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659209",
"title": "Facilitating Intercultural Development: Preparing Future Engineers for Multidisciplinary Teams and Multicultural Environments",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659209/18j98GoGIog",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise-ie/2021/3829/0/382900b592",
"title": "Promoting English Learners’ Intercultural Competence through Technology-enhanced Model",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900b592/1C8GKQdaMA8",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2022/1666/0/166600a892",
"title": "A Study of Intercultural Competence under CLIL with the Assistance of E-learning—A Case Study of English Education of Chinese Culture",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2022/166600a892/1KpBu3WZCJG",
"parentPublication": {
"id": "proceedings/icekim/2022/1666/0",
"title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2019/2632/0/263200a376",
"title": "Second Classroom Model For Cultivating Intercultural Communication Competence Of Practicality-Oriented Talents Considering State Transition Matrix",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2019/263200a376/1cI6mG3cEX6",
"parentPublication": {
"id": "proceedings/icris/2019/2632/0",
"title": "2019 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlbdbi/2019/5094/0/509400a074",
"title": "An Empirical Study on the Relationship between Using Motivation of Internet Language and Intercultural Communication Competence of College Students",
"doi": null,
"abstractUrl": "/proceedings-article/mlbdbi/2019/509400a074/1gjRGK2tjdm",
"parentPublication": {
"id": "proceedings/mlbdbi/2019/5094/0",
"title": "2019 International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a009",
"title": "A study on the cultivation model of intercultural communicative competence in foreign language teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a009/1tnYl4u5nWw",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieit/2021/2563/0/256300a613",
"title": "Study of Intercultural Communication Training in Interpreting Teaching Based on Multimedia Technology",
"doi": null,
"abstractUrl": "/proceedings-article/ieit/2021/256300a613/1wHKoq6Hrwc",
"parentPublication": {
"id": "proceedings/ieit/2021/2563/0",
"title": "2021 International Conference on Internet, Education and Information Technology (IEIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.