data dict |
|---|
{
"proceeding": {
"id": "1qyxi3OgORy",
"title": "2020 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qyxpuWM1bO",
"doi": "10.1109/3DV50981.2020.00048",
"title": "Refractive Multi-view Stereo",
"normalizedTitle": "Refractive Multi-view Stereo",
"abstract": "In this article we show how to extend the multi-view stereo technique when the object to be reconstructed is inside a transparent - but refractive - material, which causes distortions in the images. We provide a theoretical formulation of the problem accounting for a general, non-planar shape of the refractive interface, then a discrete solving method, which are validated by tests on synthetic and real data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this article we show how to extend the multi-view stereo technique when the object to be reconstructed is inside a transparent - but refractive - material, which causes distortions in the images. We provide a theoretical formulation of the problem accounting for a general, non-planar shape of the refractive interface, then a discrete solving method, which are validated by tests on synthetic and real data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this article we show how to extend the multi-view stereo technique when the object to be reconstructed is inside a transparent - but refractive - material, which causes distortions in the images. We provide a theoretical formulation of the problem accounting for a general, non-planar shape of the refractive interface, then a discrete solving method, which are validated by tests on synthetic and real data.",
"fno": "812800a384",
"keywords": [
"Image Reconstruction",
"Refractive Index",
"Stereo Image Processing",
"Refractive Multiview Stereo",
"Multiview Stereo Technique",
"Theoretical Formulation",
"Nonplanar Shape",
"Refractive Interface",
"Problem Accounting",
"Discrete Solving Method",
"Cameras",
"Shape",
"Optical Refraction",
"Optical Imaging",
"Geometry",
"Refractive Index",
"Image Reconstruction",
"3 Dreconstruction",
"Multi View Stereo",
"Refraction"
],
"authors": [
{
"affiliation": "INP-ENSEEIHT,Toulouse,France",
"fullName": "Matthew Cassidy",
"givenName": "Matthew",
"surname": "Cassidy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IRIT, UMR CNRS 5505,Toulouse,France",
"fullName": "Jean Mélou",
"givenName": "Jean",
"surname": "Mélou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "GREYC, UMR CNRS 6072,Caen,France",
"fullName": "Yvain Quéau",
"givenName": "Yvain",
"surname": "Quéau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "DIKU,Copenhagen,Denmark",
"fullName": "François Lauze",
"givenName": "François",
"surname": "Lauze",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IRIT, UMR CNRS 5505,Toulouse,France",
"fullName": "Jean-Denis Durou",
"givenName": "Jean-Denis",
"surname": "Durou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "384-393",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-8128-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "812800a374",
"articleId": "1qyxn2r4ocE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "812800a394",
"articleId": "1qyxkbFekXC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ca/2014/8205/0/07026258",
"title": "The Dispersion Coefficient of Air Refractive Index Measurement System Based on CCD Imaging Technology",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2014/07026258/12OmNAWpymQ",
"parentPublication": {
"id": "proceedings/ca/2014/8205/0",
"title": "2014 7th Conference on Control and Automation (CA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/037P1A37",
"title": "Refractive height fields from single and multiple images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/037P1A37/12OmNBEpnuC",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a915",
"title": "BRDF Estimation of Structural Color Object by Using Hyper Spectral Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a915/12OmNBSSVme",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032f325",
"title": "Refractive Structure-from-Motion Through a Flat Refractive Interface",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032f325/12OmNvsm6vz",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567291",
"title": "Localized structures in a passive cavity with refractive index modulation",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567291/12OmNwkzusV",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/423O3C03",
"title": "A theory of multi-layer flat refractive geometry",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/423O3C03/12OmNxxdZzR",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977410",
"title": "Camera Calibration for Plate Refractive Imaging System",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977410/12OmNzC5T4s",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2011/4362/0/4362a146",
"title": "Refractive Epipolar Geometry for Underwater Stereo Matching",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2011/4362a146/12OmNzlly4y",
"parentPublication": {
"id": "proceedings/crv/2011/4362/0",
"title": "2011 Canadian Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/02/08812919",
"title": "Reconstruction of Geometric and Optical Parameters of Non-Planar Objects with Thin Film",
"doi": null,
"abstractUrl": "/journal/tp/2021/02/08812919/1cPWFDzHL4k",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2021/3596/0/359600a110",
"title": "A novel optical refractive index sensor based on VCSELs and gold nanoparticle arrays",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2021/359600a110/1tMPOU0iVQk",
"parentPublication": {
"id": "proceedings/icedme/2021/3596/0",
"title": "2021 4th International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAXxX2a",
"doi": "10.1109/VR.2017.7892333",
"title": "Towards understanding scene transition techniques in immersive 360 movies and cinematic experiences",
"normalizedTitle": "Towards understanding scene transition techniques in immersive 360 movies and cinematic experiences",
"abstract": "Many researchers have studied methods of effective travel in virtual environments, but little work has considered scene transitions, which may be important for virtual reality experiences like immersive 360 degree movies. In this research, we designed and evaluated three different scene transition techniques in two environments, conducted a pilot study, and collected metrics related to sickness, spatial orientation, and preference. Our preliminary results indicate that faster techniques are generally preferred by gamers and more gradual transitions are preferred by participants with less experience with 3D gaming and virtual reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many researchers have studied methods of effective travel in virtual environments, but little work has considered scene transitions, which may be important for virtual reality experiences like immersive 360 degree movies. In this research, we designed and evaluated three different scene transition techniques in two environments, conducted a pilot study, and collected metrics related to sickness, spatial orientation, and preference. Our preliminary results indicate that faster techniques are generally preferred by gamers and more gradual transitions are preferred by participants with less experience with 3D gaming and virtual reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many researchers have studied methods of effective travel in virtual environments, but little work has considered scene transitions, which may be important for virtual reality experiences like immersive 360 degree movies. In this research, we designed and evaluated three different scene transition techniques in two environments, conducted a pilot study, and collected metrics related to sickness, spatial orientation, and preference. Our preliminary results indicate that faster techniques are generally preferred by gamers and more gradual transitions are preferred by participants with less experience with 3D gaming and virtual reality.",
"fno": "07892333",
"keywords": [
"Interpolation",
"Teleportation",
"Motion Pictures",
"Three Dimensional Displays",
"Virtual Environments",
"Google",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Texas A&M University, United States",
"fullName": "Kasra Rahimi Moghadam",
"givenName": "Kasra Rahimi",
"surname": "Moghadam",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Texas A&M University, United States",
"fullName": "Eric D. Ragan",
"givenName": "Eric D.",
"surname": "Ragan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "375-376",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892332",
"articleId": "12OmNAq3hBL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892334",
"articleId": "12OmNxFaLDm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2002/1492/0/14920093",
"title": "A Combined Immersive and Desktop Authoring Tool for Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920093/12OmNAi6vVS",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836475",
"title": "Enhancing Immersive Cinematic Experience with Augmented Virtuality",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836475/12OmNCm7BFH",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2017/5812/0/08056613",
"title": "An immersive virtual environment for collaborative geovisualization",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2017/08056613/12OmNvCi45l",
"parentPublication": {
"id": "proceedings/vs-games/2017/5812/0",
"title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446620",
"title": "Spatial Updating and Simulator Sickness During Steering and Jumping in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446620/13bd1fKQxs4",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/06/08554159",
"title": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness",
"doi": null,
"abstractUrl": "/journal/tg/2020/06/08554159/17D45WB0qbp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049698",
"title": "Gaining the High Ground: Teleportation to Mid-Air Targets in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049698/1KYotugT0xW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797777",
"title": "Exploration of Large Omnidirectional Images in Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797777/1cJ0JISlXDG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797994",
"title": "Redirecting View Rotation in Immersive Movies with Washout Filters",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797994/1cJ19tjOG2s",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864599",
"title": "Development of a Directed Teleport Function for Immersive Training in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864599/1e5Zs0bUtxu",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a183",
"title": "Enabling Collaborative Interaction with 360° Panoramas between Large-scale Displays and Immersive Headsets",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a183/1yeQBWUxple",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13l5NWGUV7b",
"title": "2018 IEEE/ACM 40th International Conference on Software Engineering (ICSE)",
"acronym": "icse",
"groupId": "1000691",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13l5NXlN9Qu",
"doi": "10.1145/3180155.3180188",
"title": "Launch-Mode-Aware Context-Sensitive Activity Transition Analysis",
"normalizedTitle": "Launch-Mode-Aware Context-Sensitive Activity Transition Analysis",
"abstract": "Existing static analyses model activity transitions in Android apps context-insensitively, making it impossible to distinguish different activity launch modes, reducing the pointer analysis precision for an activity's callbacks, and potentially resulting in infeasible activity transition paths. In this paper, we introduce Chime, a launch-mode-aware context-sensitive activity transition analysis that models different instances of an activity class according to its launch mode and the transitions between activities context-sensitively, by working together with an object-sensitive pointer analysis. Our evaluation shows that our context-sensitive activity transition analysis is more precise than its context-insensitive counterpart in capturing activity transitions, facilitating GUI testing, and improving the pointer analysis precision.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Existing static analyses model activity transitions in Android apps context-insensitively, making it impossible to distinguish different activity launch modes, reducing the pointer analysis precision for an activity's callbacks, and potentially resulting in infeasible activity transition paths. In this paper, we introduce Chime, a launch-mode-aware context-sensitive activity transition analysis that models different instances of an activity class according to its launch mode and the transitions between activities context-sensitively, by working together with an object-sensitive pointer analysis. Our evaluation shows that our context-sensitive activity transition analysis is more precise than its context-insensitive counterpart in capturing activity transitions, facilitating GUI testing, and improving the pointer analysis precision.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Existing static analyses model activity transitions in Android apps context-insensitively, making it impossible to distinguish different activity launch modes, reducing the pointer analysis precision for an activity's callbacks, and potentially resulting in infeasible activity transition paths. In this paper, we introduce Chime, a launch-mode-aware context-sensitive activity transition analysis that models different instances of an activity class according to its launch mode and the transitions between activities context-sensitively, by working together with an object-sensitive pointer analysis. Our evaluation shows that our context-sensitive activity transition analysis is more precise than its context-insensitive counterpart in capturing activity transitions, facilitating GUI testing, and improving the pointer analysis precision.",
"fno": "563801a598",
"keywords": [
"Mobile Computing",
"Program Diagnostics",
"Infeasible Activity Transition Paths",
"Launch Mode Aware Context Sensitive Activity Transition Analysis",
"Activity Class",
"Launch Mode",
"Activities Context Sensitively",
"Object Sensitive Pointer Analysis",
"Pointer Analysis Precision",
"Static Analyses Model Activity Transitions",
"Android Apps Context Insensitively",
"Androids",
"Humanoid Robots",
"Context Modeling",
"Standards",
"Analytical Models",
"Graphical User Interfaces",
"Navigation",
"Android",
"Pointer Analysis",
"Activity Transition Analysis"
],
"authors": [
{
"affiliation": null,
"fullName": "Yifei Zhang",
"givenName": "Yifei",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yulei Sui",
"givenName": "Yulei",
"surname": "Sui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jingling Xue",
"givenName": "Jingling",
"surname": "Xue",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-05-01T00:00:00",
"pubType": "proceedings",
"pages": "598-608",
"year": "2018",
"issn": "1558-1225",
"isbn": "978-1-4503-5638-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "563801a586",
"articleId": "13l5NXDXub9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "563801a609",
"articleId": "13l5NXBGH84",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itc/2008/2402/0/04700648",
"title": "Launch-on-Shift-Capture Transition Tests",
"doi": null,
"abstractUrl": "/proceedings-article/itc/2008/04700648/12OmNBp52Gp",
"parentPublication": {
"id": "proceedings/itc/2008/2402/0",
"title": "2008 IEEE International Test Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2015/8425/0/07133990",
"title": "Advancing Android activity recognition service with Markov smoother",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2015/07133990/12OmNqEAT7s",
"parentPublication": {
"id": "proceedings/percomw/2015/8425/0",
"title": "2015 IEEE International Conference on Pervasive Computing and Communication Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ase/2015/0025/0/0025a658",
"title": "Static Window Transition Graphs for Android (T)",
"doi": null,
"abstractUrl": "/proceedings-article/ase/2015/0025a658/12OmNvJXeAo",
"parentPublication": {
"id": "proceedings/ase/2015/0025/0",
"title": "2015 30th IEEE/ACM International Conference on Automated Software Engineering (ASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ase/2017/2684/0/08115638",
"title": "All about activity injection: Threats, semantics, and detection",
"doi": null,
"abstractUrl": "/proceedings-article/ase/2017/08115638/12OmNy68ECP",
"parentPublication": {
"id": "proceedings/ase/2017/2684/0",
"title": "2017 32nd IEEE/ACM International Conference on Automated Software Engineering (ASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsme/2017/0992/0/0992a103",
"title": "AimDroid: Activity-Insulated Multi-level Automated Testing for Android Applications",
"doi": null,
"abstractUrl": "/proceedings-article/icsme/2017/0992a103/12OmNzd7bSf",
"parentPublication": {
"id": "proceedings/icsme/2017/0992/0",
"title": "2017 IEEE International Conference on Software Maintenance and Evolution (ICSME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2017/05/mso2017050022",
"title": "Adaptive Virtual Gestures for GUI Testing on Smartphones",
"doi": null,
"abstractUrl": "/magazine/so/2017/05/mso2017050022/13rRUxOdD6q",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pst/2017/2487/0/248701a253",
"title": "Real-Time Detection and Reaction to Activity Hijacking Attacks in Android Smartphones (Short Paper)",
"doi": null,
"abstractUrl": "/proceedings-article/pst/2017/248701a253/144U9blGNCT",
"parentPublication": {
"id": "proceedings/pst/2017/2487/0",
"title": "2017 15th Annual Conference on Privacy, Security and Trust (PST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcom/2018/8021/0/802100a102",
"title": "Automatically Detecting Malicious Sensitive Data Usage in Android Applications",
"doi": null,
"abstractUrl": "/proceedings-article/bigcom/2018/802100a102/14jQfP8mWAO",
"parentPublication": {
"id": "proceedings/bigcom/2018/8021/0",
"title": "2018 4th International Conference on Big Data Computing and Communications (BIGCOM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sp/2015/6949/0/6949a931",
"title": "What the App is That? Deception and Countermeasures in the Android User Interface",
"doi": null,
"abstractUrl": "/proceedings-article/sp/2015/6949a931/17D45WHONqN",
"parentPublication": {
"id": "proceedings/sp/2015/6949/0",
"title": "2015 IEEE Symposium on Security and Privacy (SP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2020/7121/0/712100a457",
"title": "Multiple-Entry Testing of Android Applications by Constructing Activity Launching Contexts",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2020/712100a457/1pK5gvtSpk4",
"parentPublication": {
"id": "proceedings/icse/2020/7121/0",
"title": "2020 IEEE/ACM 42nd International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1S5KRwg8",
"doi": "10.1109/ISMAR-Adjunct.2018.00072",
"title": "Toward More Believable VR by Smooth Transition Between Real and Virtual Environments via Omnidirectional Video",
"normalizedTitle": "Toward More Believable VR by Smooth Transition Between Real and Virtual Environments via Omnidirectional Video",
"abstract": "In conventional virtual reality systems, users usually do not perceive and recognize the experience as reality. For example, users of a virtual disaster simulator know, consciously or unconsciously, that the presented disaster is not real, which inherently limits the training effect. To make the virtual experience more believable, we propose a novel real-virtual transiton technique that preserves the sense of “conviction about reality” in a virtual environment. This is realized by spatio-temporal smooth transition from the real environment to the virtual environment with omnidirectional video captured in advance at the user's position. Our technique requires less preparation cost and presents a more believable experience compared to existing transition techniques using a handmade 3D replica of the real environment. In this article, reported are the concept of our technique, a prototype system and a preliminary user study that has shown the effectiveness of the technique.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In conventional virtual reality systems, users usually do not perceive and recognize the experience as reality. For example, users of a virtual disaster simulator know, consciously or unconsciously, that the presented disaster is not real, which inherently limits the training effect. To make the virtual experience more believable, we propose a novel real-virtual transiton technique that preserves the sense of “conviction about reality” in a virtual environment. This is realized by spatio-temporal smooth transition from the real environment to the virtual environment with omnidirectional video captured in advance at the user's position. Our technique requires less preparation cost and presents a more believable experience compared to existing transition techniques using a handmade 3D replica of the real environment. In this article, reported are the concept of our technique, a prototype system and a preliminary user study that has shown the effectiveness of the technique.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In conventional virtual reality systems, users usually do not perceive and recognize the experience as reality. For example, users of a virtual disaster simulator know, consciously or unconsciously, that the presented disaster is not real, which inherently limits the training effect. To make the virtual experience more believable, we propose a novel real-virtual transiton technique that preserves the sense of “conviction about reality” in a virtual environment. This is realized by spatio-temporal smooth transition from the real environment to the virtual environment with omnidirectional video captured in advance at the user's position. Our technique requires less preparation cost and presents a more believable experience compared to existing transition techniques using a handmade 3D replica of the real environment. In this article, reported are the concept of our technique, a prototype system and a preliminary user study that has shown the effectiveness of the technique.",
"fno": "08699257",
"keywords": [
"Video Signal Processing",
"Virtual Reality",
"Omnidirectional Video",
"Virtual Experience",
"Real Virtual Transiton Technique",
"Virtual Environment",
"Spatio Temporal Smooth Transition",
"Virtual Reality Systems",
"VR",
"Virtual Environments",
"Three Dimensional Displays",
"Resists",
"Prototypes",
"Visual Effects",
"Solid Modeling",
"Cameras",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Virtual Reality",
"Computing Methodologies X 2014 Artificial Intelligence X 2014 Computer Vision X 2014 Image And Video Acquisition"
],
"authors": [
{
"affiliation": "Nara Institute of Science and Technology, Japan",
"fullName": "Shingo Okeda",
"givenName": "Shingo",
"surname": "Okeda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology, Japan",
"fullName": "Hikari Takehara",
"givenName": "Hikari",
"surname": "Takehara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "SenseTime, Japan",
"fullName": "Norihiko Kawai",
"givenName": "Norihiko",
"surname": "Kawai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology, Japan",
"fullName": "Nobuchika Sakata",
"givenName": "Nobuchika",
"surname": "Sakata",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Data Science, Shiga University, Japan",
"fullName": "Tomokazu Sato",
"givenName": "Tomokazu",
"surname": "Sato",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Data Science, Shiga University, Japan",
"fullName": "Takuma Tanaka",
"givenName": "Takuma",
"surname": "Tanaka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology, Japan",
"fullName": "Kiyoshi Kiyokawa",
"givenName": "Kiyoshi",
"surname": "Kiyokawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "222-225",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699229",
"articleId": "19F1LS1YWuA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699270",
"articleId": "19F1SDkm35e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wi-iat/2009/3801/1/3801a631",
"title": "Believable Electronic Trading Environments on the Web",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2009/3801a631/12OmNAle6Q9",
"parentPublication": {
"id": "proceedings/wi-iat/2009/3801/1",
"title": "2009 IEEE/WIC/ACM International Joint Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a121",
"title": "[POSTER] Believable Virtual Characters for Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a121/12OmNwJybU7",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836491",
"title": "Using Visual Effects to Facilitate Depth Perception for Spatial Tasks in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836491/12OmNwdtw9P",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446346",
"title": "Reducing VR Sickness Through Peripheral Visual Effects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446346/13bd1fHrlRY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446157",
"title": "VR Music",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446157/13bd1gQYgEq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itei/2021/8050/0/805000a232",
"title": "VR technology applied to traditional dance",
"doi": null,
"abstractUrl": "/proceedings-article/itei/2021/805000a232/1CzeG2lZvEI",
"parentPublication": {
"id": "proceedings/itei/2021/8050/0",
"title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a119",
"title": "Re-enacting Football Matches in VR using Virtual Agents’ Realistic Behaviours",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a119/1KmFbcahv2M",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798100",
"title": "Towards a Framework on Accessible and Social VR in Education",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798100/1cJ16Rutlm0",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864520",
"title": "Actors in VR storytelling",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864520/1e5ZrTyTjc4",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090559",
"title": "A Methodology of Eye Gazing Attention Determination for VR Training",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090559/1jIxoACmybu",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgp7L6LcY",
"doi": "10.1109/VR55154.2023.00083",
"title": "Designing Viewpoint Transition Techniques in Multiscale Virtual Environments",
"normalizedTitle": "Designing Viewpoint Transition Techniques in Multiscale Virtual Environments",
"abstract": "Viewpoint transitions have been shown to improve users' spatial orientation and help them build a cognitive map when they are navigating an unfamiliar virtual environment. Previous work has investigated transitions in single-scale virtual environments, focusing on trajectories and continuity. We extend this work with an in-depth investigation of transition techniques in multiscale virtual environments (MVEs). We identify challenges in navigating MVEs with nested structures and assess how different transition techniques affect spatial understanding and usability. Through two user studies, we investigated transition trajectories, interactive control of transition movement, and speed modulation in a nested MVE. We show that some types of viewpoint transitions enhance users' spatial awareness and confidence in their spatial orientation and reduce the need to revisit a target point of interest multiple times.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Viewpoint transitions have been shown to improve users' spatial orientation and help them build a cognitive map when they are navigating an unfamiliar virtual environment. Previous work has investigated transitions in single-scale virtual environments, focusing on trajectories and continuity. We extend this work with an in-depth investigation of transition techniques in multiscale virtual environments (MVEs). We identify challenges in navigating MVEs with nested structures and assess how different transition techniques affect spatial understanding and usability. Through two user studies, we investigated transition trajectories, interactive control of transition movement, and speed modulation in a nested MVE. We show that some types of viewpoint transitions enhance users' spatial awareness and confidence in their spatial orientation and reduce the need to revisit a target point of interest multiple times.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Viewpoint transitions have been shown to improve users' spatial orientation and help them build a cognitive map when they are navigating an unfamiliar virtual environment. Previous work has investigated transitions in single-scale virtual environments, focusing on trajectories and continuity. We extend this work with an in-depth investigation of transition techniques in multiscale virtual environments (MVEs). We identify challenges in navigating MVEs with nested structures and assess how different transition techniques affect spatial understanding and usability. Through two user studies, we investigated transition trajectories, interactive control of transition movement, and speed modulation in a nested MVE. We show that some types of viewpoint transitions enhance users' spatial awareness and confidence in their spatial orientation and reduce the need to revisit a target point of interest multiple times.",
"fno": "481500a680",
"keywords": [
"Three Dimensional Displays",
"Navigation",
"Virtual Environments",
"Modulation",
"Focusing",
"User Interfaces",
"Trajectory",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Techniques"
],
"authors": [
{
"affiliation": "Simon Fraser University",
"fullName": "Jong-In Lee",
"givenName": "Jong-In",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research, retired",
"fullName": "Paul Asente",
"givenName": "Paul",
"surname": "Asente",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University",
"fullName": "Wolfgang Stuerzlinger",
"givenName": "Wolfgang",
"surname": "Stuerzlinger",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "680-690",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1MNgoTo5TUs",
"name": "pvr202348150-010108464s1-mm_481500a680.zip",
"size": "138 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108464s1-mm_481500a680.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "481500a669",
"articleId": "1MNgtBAIjG8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a691",
"articleId": "1MNgl22Q3XG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892276",
"title": "A comparison of methods for navigation and wayfinding in large virtual environments using walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892276/12OmNAQJzMG",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892333",
"title": "Towards understanding scene transition techniques in immersive 360 movies and cinematic experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892333/12OmNAXxX2a",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2006/0224/0/02240175",
"title": "Design and Evaluation of Navigation Techniques for Multiscale Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240175/12OmNyv7mkJ",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567311",
"title": "Ising-Bloch transition for spatially extended patterns",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567311/12OmNz61dnX",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2018/5638/0/563801a598",
"title": "Launch-Mode-Aware Context-Sensitive Activity Transition Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2018/563801a598/13l5NXlN9Qu",
"parentPublication": {
"id": "proceedings/icse/2018/5638/0",
"title": "2018 IEEE/ACM 40th International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260946",
"title": "The Effect of Transition Type in Multi-View 360° Media",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260946/13rRUxly8T4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v1005",
"title": "Scalable WIM: Effective Exploration in Large-scale Astrophysical Environments",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v1005/13rRUygBw70",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797777",
"title": "Exploration of Large Omnidirectional Images in Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797777/1cJ0JISlXDG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a452",
"title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/10/09376675",
"title": "Multiscale Unfolding: Illustratively Visualizing the Whole Genome at a Glance",
"doi": null,
"abstractUrl": "/journal/tg/2022/10/09376675/1rSN1VOCUHC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0X6CH9wk",
"doi": "10.1109/VR.2019.8797968",
"title": "Did You See What I Saw?: Comparing User Synchrony When Watching 360° Video In HMD Vs Flat Screen",
"normalizedTitle": "Did You See What I Saw?: Comparing User Synchrony When Watching 360° Video In HMD Vs Flat Screen",
"abstract": "This study examined whether the high level of immersion provided by HMDs encourages participants to synchronise their attention during viewing. 39 participants watched the 360° documentary “Clouds Over Sidra” using either a HMD or via a flat screen tablet display. We found that the HMD group showed significantly greater overall ISC did the tablet group and that this effect was strongest during transition between scenes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study examined whether the high level of immersion provided by HMDs encourages participants to synchronise their attention during viewing. 39 participants watched the 360° documentary “Clouds Over Sidra” using either a HMD or via a flat screen tablet display. We found that the HMD group showed significantly greater overall ISC did the tablet group and that this effect was strongest during transition between scenes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study examined whether the high level of immersion provided by HMDs encourages participants to synchronise their attention during viewing. 39 participants watched the 360° documentary “Clouds Over Sidra” using either a HMD or via a flat screen tablet display. We found that the HMD group showed significantly greater overall ISC did the tablet group and that this effect was strongest during transition between scenes.",
"fno": "08797968",
"keywords": [
"Helmet Mounted Displays",
"Touch Sensitive Screens",
"User Synchrony",
"Sidra",
"Flat Screen Tablet Display",
"HMD Group",
"ISC",
"Clouds Over Sidra Documentary",
"Resists",
"Virtual Reality",
"Time Series Analysis",
"Cloud Computing",
"Visualization",
"Correlation",
"Media",
"360 X 00 B 0 Video",
"Synchrony",
"Inter Subject Correlation Analysis",
"Topic Area 1 Technologies X 0026 Applications",
"Topic Area 3 Interaction"
],
"authors": [
{
"affiliation": "University of Bath, Bath, UK",
"fullName": "Harry Farmer",
"givenName": "Harry",
"surname": "Farmer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of the West of England, Bristol, UK",
"fullName": "Chris Bevan",
"givenName": "Chris",
"surname": "Bevan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Bristol, Bristol, UK",
"fullName": "David P. Green",
"givenName": "David P.",
"surname": "Green",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Bath, Bath, UK",
"fullName": "Mandy Rose",
"givenName": "Mandy",
"surname": "Rose",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Bath, Bath, UK",
"fullName": "Kirsten Cater",
"givenName": "Kirsten",
"surname": "Cater",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Bath, Bath, UK",
"fullName": "Danaë Stanton-Fraser",
"givenName": "Danaë",
"surname": "Stanton-Fraser",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "916-917",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798180",
"articleId": "1cJ1bDktgoU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797967",
"articleId": "1cJ134ZOI9y",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2016/3641/0/3641a094",
"title": "Automated Spatial Calibration of HMD Systems with Unconstrained Eye-cameras",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a094/12OmNxVDuUP",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446551",
"title": "A Demonstration of ShareVR: Co-Located Experiences for Virtual Reality Between HMD and Non-HMD Users",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446551/13bd1gzWkQD",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icime/2018/7616/0/761600a130",
"title": "The Impacts of Subtitles on 360-Degree Video Journalism Watching",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2018/761600a130/17D45VsBU1i",
"parentPublication": {
"id": "proceedings/icime/2018/7616/0",
"title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699249",
"title": "Effect of Using HMDs for One Hour on Preteens Visual Fatigue",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699249/19F1RlY3coU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a812",
"title": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a812/1J7WvwZew9O",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794561",
"title": "AR HMD Guidance for Controlled Hand-Held 3D Acquisition",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794561/1dNHoWNm3GE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090490",
"title": "Evaluation of Simulator Sickness for 360° Videos on an HMD Subject to Participants’ Experience with Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090490/1jIxwgIdgsw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a231",
"title": "Perceptions of Integrating Augmented Reality into Network Cabling Tutors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a231/1pBMhdT7iN2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a371",
"title": "Annotation Tool for Precise Emotion Ground Truth Label Acquisition while Watching 360° VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a371/1qpzCZXhpS0",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a448",
"title": "CAVE vs. HMD in Distance Perception",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a448/1tnXudPdN4s",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysx79ZXcA",
"doi": "10.1109/ISMAR50242.2020.00040",
"title": "Transitioning360: Content-aware NFoV Virtual Camera Paths for 360° Video Playback",
"normalizedTitle": "Transitioning360: Content-aware NFoV Virtual Camera Paths for 360° Video Playback",
"abstract": "Despite the increasing number of head-mounted displays, many 360° VR videos are still being viewed by users on existing 2D displays. To this end, a subset of the 360° video content is often shown inside a manually or semi-automatically selected normal-field-of-view (NFoV) window. However, during the playback, simply watching an NFoV video can easily miss concurrent off-screen content. We present Transitioning360, a tool for 360° video navigation and playback on 2D displays by transitioning between multiple NFoV views that track potentially interesting targets or events. Our method computes virtual NFoV camera paths considering content awareness and diversity in an offline preprocess. During playback, the user can watch any NFoV view corresponding to a precomputed camera path. Moreover, our interface shows other candidate views, providing a sense of concurrent events. At any time, the user can transition to other candidate views for fast navigation and exploration. Experimental results including a user study demonstrate that the viewing experience using our method is more enjoyable and convenient than previous methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Despite the increasing number of head-mounted displays, many 360° VR videos are still being viewed by users on existing 2D displays. To this end, a subset of the 360° video content is often shown inside a manually or semi-automatically selected normal-field-of-view (NFoV) window. However, during the playback, simply watching an NFoV video can easily miss concurrent off-screen content. We present Transitioning360, a tool for 360° video navigation and playback on 2D displays by transitioning between multiple NFoV views that track potentially interesting targets or events. Our method computes virtual NFoV camera paths considering content awareness and diversity in an offline preprocess. During playback, the user can watch any NFoV view corresponding to a precomputed camera path. Moreover, our interface shows other candidate views, providing a sense of concurrent events. At any time, the user can transition to other candidate views for fast navigation and exploration. Experimental results including a user study demonstrate that the viewing experience using our method is more enjoyable and convenient than previous methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Despite the increasing number of head-mounted displays, many 360° VR videos are still being viewed by users on existing 2D displays. To this end, a subset of the 360° video content is often shown inside a manually or semi-automatically selected normal-field-of-view (NFoV) window. However, during the playback, simply watching an NFoV video can easily miss concurrent off-screen content. We present Transitioning360, a tool for 360° video navigation and playback on 2D displays by transitioning between multiple NFoV views that track potentially interesting targets or events. Our method computes virtual NFoV camera paths considering content awareness and diversity in an offline preprocess. During playback, the user can watch any NFoV view corresponding to a precomputed camera path. Moreover, our interface shows other candidate views, providing a sense of concurrent events. At any time, the user can transition to other candidate views for fast navigation and exploration. Experimental results including a user study demonstrate that the viewing experience using our method is more enjoyable and convenient than previous methods.",
"fno": "850800a185",
"keywords": [
"Cameras",
"Computer Graphics",
"Helmet Mounted Displays",
"User Interfaces",
"Video Signal Processing",
"Virtual Reality",
"Transitioning 360",
"Content Aware N Fo V Virtual Camera Paths",
"Video Playback",
"Head Mounted Displays",
"360 X 00 B 0 VR Videos",
"360 X 00 B 0 Video Content",
"Normal Field Of View Window",
"N Fo V Video",
"Off Screen Content",
"Video Navigation",
"Multiple N Fo V Views",
"Track Potentially Interesting Targets",
"Content Awareness",
"N Fo V View",
"Precomputed Camera Path",
"Candidate Views",
"Concurrent Events",
"Viewing Experience",
"Virtual N Fo V Camera Paths",
"Visualization",
"Target Tracking",
"Navigation",
"Two Dimensional Displays",
"Tools",
"Cameras",
"Videos",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Virtual Reality",
"Computing Methodologies",
"Computer Graphics",
"Image Manipulation",
"Image Processing"
],
"authors": [
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems",
"fullName": "Miao Wang",
"givenName": "Miao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems",
"fullName": "Yi-Jun Li",
"givenName": "Yi-Jun",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems",
"fullName": "Wen-Xuan Zhang",
"givenName": "Wen-Xuan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Bath,UK",
"fullName": "Christian Richardt",
"givenName": "Christian",
"surname": "Richardt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University,BNRist,Beijing",
"fullName": "Shi-Min Hu",
"givenName": "Shi-Min",
"surname": "Hu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "185-194",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a174",
"articleId": "1pysyl9FDhu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a195",
"articleId": "1pystVP5LFK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892229",
"title": "6-DOF VR videos with a single 360-camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892229/12OmNAlvHtF",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551577",
"title": "Viewport-Driven Rate-Distortion Optimized Scalable Live 360° Video Network Multicast",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551577/17D45WZZ7Db",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08661657",
"title": "Motion parallax for 360° RGBD video",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08661657/18bmQqdj3Nu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c011",
"title": "Pano-AVQA: Grounded Audio-Visual Question Answering on 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c011/1BmLjJCm02Q",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09779957",
"title": "Casual 6-DoF: free-viewpoint panorama using a handheld 360° camera",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09779957/1DBTD2uB4di",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600d752",
"title": "360MonoDepth: High-Resolution 360° Monocular Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600d752/1H1mgCrsMtG",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049696",
"title": "Wavelet-Based Fast Decoding of 360° Videos",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049696/1KYoz753Sxi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a258",
"title": "360° Surface Regression with a Hyper-Sphere Loss",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a258/1ezRDMEgU3C",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090456",
"title": "On the Effect of Standing and Seated Viewing of 360° Videos on Subjective Quality Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090456/1jIxyayiDp6",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093262",
"title": "360-Indoor: Towards Learning Real-World Objects in 360° Indoor Equirectangular Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093262/1jPbAWPyE8g",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tGcgESIWpa",
"title": "2020 9th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"acronym": "iiai-aai",
"groupId": "1801921",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1tGcwoZgSsg",
"doi": "10.1109/IIAI-AAI50415.2020.00091",
"title": "Kansei Transition Analysis by Time-series Change of Media Content",
"normalizedTitle": "Kansei Transition Analysis by Time-series Change of Media Content",
"abstract": "In this paper, we present a new concept, a waveform model of Kansei transition for time-series media content. It is important to apply the time-series change of media content to Kansei information processing. For example, the impression of music media content changes over time. In our model, we represent Kansei transition by time-series change of media content as waveforms. We realize new Kansei similarity by comparison with Kansei transitions represented by waveforms applying a signal processing technique. Through new Kansei similarity, it is possible to realize media content retrieval and recommendation systems corresponding to the time-series Kansei transition of media content. Our model consists of two modules: a high-order media-Kansei transformation module and a waveform similarity computation module. The high-order media-Kansei transformation module extracts each Kansei magnitude by each time from the features of media content. The waveform similarity computation module computes similarities between each waveform represented as Kansei transition.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present a new concept, a waveform model of Kansei transition for time-series media content. It is important to apply the time-series change of media content to Kansei information processing. For example, the impression of music media content changes over time. In our model, we represent Kansei transition by time-series change of media content as waveforms. We realize new Kansei similarity by comparison with Kansei transitions represented by waveforms applying a signal processing technique. Through new Kansei similarity, it is possible to realize media content retrieval and recommendation systems corresponding to the time-series Kansei transition of media content. Our model consists of two modules: a high-order media-Kansei transformation module and a waveform similarity computation module. The high-order media-Kansei transformation module extracts each Kansei magnitude by each time from the features of media content. The waveform similarity computation module computes similarities between each waveform represented as Kansei transition.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present a new concept, a waveform model of Kansei transition for time-series media content. It is important to apply the time-series change of media content to Kansei information processing. For example, the impression of music media content changes over time. In our model, we represent Kansei transition by time-series change of media content as waveforms. We realize new Kansei similarity by comparison with Kansei transitions represented by waveforms applying a signal processing technique. Through new Kansei similarity, it is possible to realize media content retrieval and recommendation systems corresponding to the time-series Kansei transition of media content. Our model consists of two modules: a high-order media-Kansei transformation module and a waveform similarity computation module. The high-order media-Kansei transformation module extracts each Kansei magnitude by each time from the features of media content. The waveform similarity computation module computes similarities between each waveform represented as Kansei transition.",
"fno": "739700a418",
"keywords": [
"Content Based Retrieval",
"Feature Extraction",
"Image Retrieval",
"Music",
"Recommender Systems",
"Signal Processing",
"Time Series",
"Kansei Magnitude",
"Media Content Retrieval",
"Signal Processing Technique",
"Time Series Change",
"Waveform Similarity Computation Module",
"High Order Media Kansei Transformation Module",
"Kansei Transition Analysis",
"Time Series Kansei Transition",
"Recommendation Systems",
"Kansei Similarity",
"Music Media Content Changes",
"Kansei Information Processing",
"Time Series Media Content",
"Analytical Models",
"Computational Modeling",
"Media",
"Signal Processing",
"Feature Extraction",
"Informatics",
"Kansei Transition",
"Media Content",
"Media Kansei Transformation",
"Similarity",
"Time Series Variation Detection"
],
"authors": [
{
"affiliation": "Musashino University,Department of Data Science,Tokyo,Japan",
"fullName": "Takafumi Nakanishi",
"givenName": "Takafumi",
"surname": "Nakanishi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Musashino University,Department of Data Science,Tokyo,Japan",
"fullName": "Ryotaro Okada",
"givenName": "Ryotaro",
"surname": "Okada",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Musashino University,Department of Data Science,Tokyo,Japan",
"fullName": "Rintaro Nakahodo",
"givenName": "Rintaro",
"surname": "Nakahodo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iiai-aai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-09-01T00:00:00",
"pubType": "proceedings",
"pages": "418-423",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7397-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "739700a414",
"articleId": "1tGci8T1SlG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "739700a424",
"articleId": "1tGcktO6zFS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icoin/2001/0951/0/09510285",
"title": "A Design of A Kansei Retrieval System for Distributed Multi-media Databases",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2001/09510285/12OmNCcKQe0",
"parentPublication": {
"id": "proceedings/icoin/2001/0951/0",
"title": "Proceedings 15th International Conference on Information Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169827",
"title": "MICO - Media in Context",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169827/12OmNviHKhl",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2016/8985/0/8985a138",
"title": "Kansei Engineering with Online Content Mining for Cross-Border Logistics Service Design",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985a138/12OmNxw5BaX",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2017/0621/0/0621a029",
"title": "Kansei Engineering with Online Review Mining for Hotel Service Development",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2017/0621a029/12OmNy3iFvt",
"parentPublication": {
"id": "proceedings/iiai-aai/2017/0621/0",
"title": "2017 6th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2015/7367/0/7367d405",
"title": "What Drives Consumers to Click on Social Media Ads? The Roles of Content, Media, and Individual Factors",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2015/7367d405/12OmNzTH0Ti",
"parentPublication": {
"id": "proceedings/hicss/2015/7367/0",
"title": "2015 48th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icds/2009/3526/0/3526a208",
"title": "Mobile Media for Heterogeneous Interaction Landscapes - Towards Integrated Liquid Media Ojects",
"doi": null,
"abstractUrl": "/proceedings-article/icds/2009/3526a208/12OmNzmLxC7",
"parentPublication": {
"id": "proceedings/icds/2009/3526/0",
"title": "International Conference on the Digital Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260946",
"title": "The Effect of Transition Type in Multi-View 360° Media",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260946/13rRUxly8T4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2012/03/mmu2012030002",
"title": "Rich Media, Poor Media",
"doi": null,
"abstractUrl": "/magazine/mu/2012/03/mmu2012030002/13rRUyXKxV3",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2021/2420/0/242000a382",
"title": "Semantic Waveform Model for Similarity Measure by Time-series Variation in Meaning",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2021/242000a382/1Eb2MAQ4NLW",
"parentPublication": {
"id": "proceedings/iiai-aai/2021/2420/0",
"title": "2021 10th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2019/2627/0/262700a795",
"title": "A Class Content Summary Method Based on Media-driven Real-time Content Management Framework",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2019/262700a795/1hrLAmVBDC8",
"parentPublication": {
"id": "proceedings/iiai-aai/2019/2627/0",
"title": "2019 8th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0NcRFX5m",
"doi": "10.1109/VR.2019.8797974",
"title": "Remapped Physical-Virtual Interfaces with Bimanual Haptic Retargeting",
"normalizedTitle": "Remapped Physical-Virtual Interfaces with Bimanual Haptic Retargeting",
"abstract": "This paper proposes a novel interface for virtual reality in which physical interface components are mapped to multiple virtual counterparts using haptic retargeting illusions. This gives virtual reality interfaces the ability to have correct haptic sensations for many virtual buttons although in the physical space there is only one. This is a generic system that can be applied to areas including design, interaction tasks, product prototype development and interactive games in virtual reality. The system presented extends existing retargeting algorithms to support asymmetric bimanual interactions. A new warp technique, called interface warp, was developed to support remapped virtual reality user interfaces. Through an experimental user study, we explore the effects of bimanual retargeting and the interface warp technique on task response time, errors, presence, perceived manipulation compared to unimanual (single handed) retargeting and other existing warp techniques. The results demonstrated faster task response time and less errors for the interface warp technique and shows no significant effect of bimanual interactions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a novel interface for virtual reality in which physical interface components are mapped to multiple virtual counterparts using haptic retargeting illusions. This gives virtual reality interfaces the ability to have correct haptic sensations for many virtual buttons although in the physical space there is only one. This is a generic system that can be applied to areas including design, interaction tasks, product prototype development and interactive games in virtual reality. The system presented extends existing retargeting algorithms to support asymmetric bimanual interactions. A new warp technique, called interface warp, was developed to support remapped virtual reality user interfaces. Through an experimental user study, we explore the effects of bimanual retargeting and the interface warp technique on task response time, errors, presence, perceived manipulation compared to unimanual (single handed) retargeting and other existing warp techniques. The results demonstrated faster task response time and less errors for the interface warp technique and shows no significant effect of bimanual interactions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a novel interface for virtual reality in which physical interface components are mapped to multiple virtual counterparts using haptic retargeting illusions. This gives virtual reality interfaces the ability to have correct haptic sensations for many virtual buttons although in the physical space there is only one. This is a generic system that can be applied to areas including design, interaction tasks, product prototype development and interactive games in virtual reality. The system presented extends existing retargeting algorithms to support asymmetric bimanual interactions. A new warp technique, called interface warp, was developed to support remapped virtual reality user interfaces. Through an experimental user study, we explore the effects of bimanual retargeting and the interface warp technique on task response time, errors, presence, perceived manipulation compared to unimanual (single handed) retargeting and other existing warp techniques. The results demonstrated faster task response time and less errors for the interface warp technique and shows no significant effect of bimanual interactions.",
"fno": "08797974",
"keywords": [
"Haptic Interfaces",
"Virtual Reality",
"Physical Virtual Interfaces",
"Bimanual Haptic Retargeting",
"Physical Interface Components",
"Haptic Retargeting Illusions",
"Virtual Buttons",
"Asymmetric Bimanual Interactions",
"Virtual Reality User Interfaces",
"Interface Warp Technique",
"Virtual Counterparts",
"Haptic Sensations",
"Haptic Interfaces",
"Virtual Reality",
"Task Analysis",
"User Interfaces",
"Mathematical Model",
"Visualization",
"Shape",
"H 5 2 Information Interfaces And Presentation User Interfaces X 2014 Haptic I O",
"H 1 2 Models And Principles User Machine Systems X 2014 Human Factors"
],
"authors": [
{
"affiliation": "University of South Australia",
"fullName": "Brandon J. Matthews",
"givenName": "Brandon J.",
"surname": "Matthews",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Bruce H. Thomas",
"givenName": "Bruce H.",
"surname": "Thomas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Stewart Von Itzstein",
"givenName": "Stewart",
"surname": "Von Itzstein",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Ross T. Smith",
"givenName": "Ross T.",
"surname": "Smith",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "19-27",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798281",
"articleId": "1cJ0NlqtQk0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797744",
"articleId": "1cJ1dKraais",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/percom/2008/3113/0/3113a125",
"title": "Efficient Retargeting of Generated Device User-Interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2008/3113a125/12OmNrY3LrY",
"parentPublication": {
"id": "proceedings/percom/2008/3113/0",
"title": "2008 Sixth Annual IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ivs/2005/8961/0/01505201",
"title": "VR haptic interfaces for teleoperation: an evaluation study",
"doi": null,
"abstractUrl": "/proceedings-article/ivs/2005/01505201/12OmNx5piQE",
"parentPublication": {
"id": "proceedings/ivs/2005/8961/0",
"title": "2005 IEEE Intelligent Vehicles Symposium Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446549",
"title": "Encounter-Type Haptic Interfaces for Virtual Reality Musical Instruments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446549/13bd1f3HvEz",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2013/03/tth2013030285",
"title": "Bimanual Integration of Position and Curvature in Haptic Perception",
"doi": null,
"abstractUrl": "/journal/th/2013/03/tth2013030285/13rRUwdIOUW",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/03/06783759",
"title": "A Survey on Bimanual Haptic Interaction",
"doi": null,
"abstractUrl": "/journal/th/2014/03/06783759/13rRUxYIMVe",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a618",
"title": "Retargeting Destinations of Passive Props for Enhancing Haptic Feedback in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a618/1CJeVmWfgWc",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a612",
"title": "Investigating The Effect of Direction on The Limits of Haptic Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a612/1JrReInK5H2",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382898",
"title": "Combining Dynamic Passive Haptics and Haptic Retargeting for Enhanced Haptic Feedback in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382898/1saZv7Dd9Ty",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a150",
"title": "Unscripted Retargeting: Reach Prediction for Haptic Retargeting in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a150/1tuAPeNHqog",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09576632",
"title": "Adaptive Reset Techniques for Haptic Retargeted Interaction",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09576632/1xIKunVGow0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxhXj1Srm",
"doi": "10.1109/VRW50115.2020.00261",
"title": "DRoom: a gamified demonstration of Real Haptics technology",
"normalizedTitle": "DRoom: a gamified demonstration of Real Haptics technology",
"abstract": "We present DRoom, a gamified demonstration of our Real Haptics technology, a novel interaction method for virtual environments. Participants in this demonstration will experience the benefits of Real Haptics while playing an escape room game in which they will have to solve several puzzles using objects in the room within a predefined time frame. And they will have fun.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present DRoom, a gamified demonstration of our Real Haptics technology, a novel interaction method for virtual environments. Participants in this demonstration will experience the benefits of Real Haptics while playing an escape room game in which they will have to solve several puzzles using objects in the room within a predefined time frame. And they will have fun.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present DRoom, a gamified demonstration of our Real Haptics technology, a novel interaction method for virtual environments. Participants in this demonstration will experience the benefits of Real Haptics while playing an escape room game in which they will have to solve several puzzles using objects in the room within a predefined time frame. And they will have fun.",
"fno": "09090441",
"keywords": [
"Games",
"Haptic Interfaces",
"Virtual Environments",
"Resists",
"Conferences",
"Cameras"
],
"authors": [
{
"affiliation": "Nokia Bell Labs",
"fullName": "Alvaro Villegas",
"givenName": "Alvaro",
"surname": "Villegas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nokia Bell Labs",
"fullName": "Pablo Perez",
"givenName": "Pablo",
"surname": "Perez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nokia Bell Labs",
"fullName": "Redouane Kachach",
"givenName": "Redouane",
"surname": "Kachach",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nokia Bell Labs",
"fullName": "Francisco Pereira",
"givenName": "Francisco",
"surname": "Pereira",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nokia Bell Labs",
"fullName": "Ester Gonzalez-Sosa",
"givenName": "Ester",
"surname": "Gonzalez-Sosa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "820-821",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090679",
"articleId": "1jIxw0869kQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090543",
"articleId": "1jIxl2iNCOQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgi/2000/0643/0/06430295",
"title": "Haptics Issues in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2000/06430295/12OmNyQYt7r",
"parentPublication": {
"id": "proceedings/cgi/2000/0643/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446128",
"title": "Rendering of Pressure and Textures Using Wearable Haptics in Immersive VR Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446128/13bd1eSlyt0",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/03/07452616",
"title": "Haptics for Product Design and Manufacturing Simulation",
"doi": null,
"abstractUrl": "/journal/th/2016/03/07452616/13rRUNvyats",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2009/03/tth2009030141",
"title": "Can Haptics Facilitate Interaction with an In-Vehicle Multifunctional Interface?",
"doi": null,
"abstractUrl": "/journal/th/2009/03/tth2009030141/13rRUwbJD4S",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2012/05/mic2012050083",
"title": "Integrating Haptics in Web Interfaces: State of the Art and Open Issues",
"doi": null,
"abstractUrl": "/magazine/ic/2012/05/mic2012050083/13rRUxASudL",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2010/03/tth2010030153",
"title": "Guest Editorial for World Haptics Spotlight Section",
"doi": null,
"abstractUrl": "/journal/th/2010/03/tth2010030153/13rRUy0HYRB",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a748",
"title": "Wormholes in VR: Teleporting Hands for Flexible Passive Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a748/1JrR93EDicE",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090574",
"title": "Real Haptics: Using Physical Manipulation to Control Virtual Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090574/1jIxihJ0Qz6",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a134",
"title": "Proxy Haptics for Surgical Training",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a134/1oZBAEAmMBW",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a333",
"title": "Study on Pseudo-haptics during Swimming Motion in a Virtual Reality Space",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a333/1qpzCt4VUOI",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXL8cnVjW",
"doi": "10.1109/VRW52623.2021.00116",
"title": "The Effects of a Stressful Physical Environment During Virtual Reality Height Exposure",
"normalizedTitle": "The Effects of a Stressful Physical Environment During Virtual Reality Height Exposure",
"abstract": "Virtual reality height exposure is a reliable method of inducing stress with low variance across age and demographics. As the virtual environment's quality of rendering fidelity increases dramatically, it leads to the neglect or simplification of the physical environment. This paper presents an experiment that explored the effects of an elevated physical platform with a virtually heightened environment to induce stress. Fifteen participants experienced four different conditions of varying physical and virtual heights. Participants reported significantly higher stress level when physically elevated regardless of the virtual height, which suggests that the inherent elevation will induce more stress within participants.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality height exposure is a reliable method of inducing stress with low variance across age and demographics. As the virtual environment's quality of rendering fidelity increases dramatically, it leads to the neglect or simplification of the physical environment. This paper presents an experiment that explored the effects of an elevated physical platform with a virtually heightened environment to induce stress. Fifteen participants experienced four different conditions of varying physical and virtual heights. Participants reported significantly higher stress level when physically elevated regardless of the virtual height, which suggests that the inherent elevation will induce more stress within participants.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality height exposure is a reliable method of inducing stress with low variance across age and demographics. As the virtual environment's quality of rendering fidelity increases dramatically, it leads to the neglect or simplification of the physical environment. This paper presents an experiment that explored the effects of an elevated physical platform with a virtually heightened environment to induce stress. Fifteen participants experienced four different conditions of varying physical and virtual heights. Participants reported significantly higher stress level when physically elevated regardless of the virtual height, which suggests that the inherent elevation will induce more stress within participants.",
"fno": "405700a468",
"keywords": [
"Rendering Computer Graphics",
"Virtual Reality",
"Stressful Physical Environment",
"Virtual Reality Height Exposure",
"Virtual Environment",
"Elevated Physical Platform",
"Virtually Heightened Environment",
"Physical Heights",
"Virtual Heights",
"Higher Stress Level",
"Three Dimensional Displays",
"Conferences",
"Virtual Reality",
"User Interfaces",
"Rendering Computer Graphics",
"Haptic Interfaces",
"Reliability",
"Human Centered Computing",
"Virtual Reality",
"Height Exposure",
"Passive Haptic Feedback"
],
"authors": [
{
"affiliation": "School of Computer Science, University of Technology,Sydney",
"fullName": "Howe Yuan Zhu",
"givenName": "Howe Yuan",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science, University of Adelaide",
"fullName": "Hsiang-Ting Chen",
"givenName": "Hsiang-Ting",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science, University of Technology,Sydney",
"fullName": "Chin-Teng Lin",
"givenName": "Chin-Teng",
"surname": "Lin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "468-469",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a466",
"articleId": "1tnXAGndw0U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a470",
"articleId": "1tnXQnECWOc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2015/9953/0/07344696",
"title": "Gestural and Postural Reactions to Stressful Event: Design of a Haptic Stressful Stimulus",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344696/12OmNqBtiOD",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446528",
"title": "Design of a Virtual Reality and Haptic Setup Linking Arousals to Training Scenarios: A Preliminary Stage",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446528/13bd1eNNYn7",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040662",
"title": "Leveraging Virtual Humans to Effectively Prepare Learners for Stressful Interpersonal Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040662/13rRUzpzeB4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a140",
"title": "Downsizing: The Effect of Mixed-Reality Person Representations on Stress and Presence in Telecommunication",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a140/17D45VsBTU8",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a102",
"title": "Studying the Role of Self and External Touch in the Appropriation of Dysmorphic Hands",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a102/1JrRiazvJ1m",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797974",
"title": "Remapped Physical-Virtual Interfaces with Bimanual Haptic Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797974/1cJ0NcRFX5m",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090408",
"title": "Effects of Physical Prop Shape on Virtual Stairs Travel Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090408/1jIxps3cZgs",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090656",
"title": "Applying Stress Management Techniques in Augmented Reality: Stress Induction and Reduction in Healthcare Providers During Virtual Triage Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090656/1jIxzMPf8g8",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a606",
"title": "Empirically Evaluating the Effects of Perceptual Information Channels on the Size Perception of Tangibles in Near-Field Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a606/1tuAGHy2cQ8",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09647972",
"title": "The Effects of Virtual and Physical Elevation on Physiological Stress During Virtual Reality Height Exposure",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09647972/1ziK9MgkooM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBqdr6N",
"title": "2015 IEEE International Conference on Data Science and Data Intensive Systems (DSDIS)",
"acronym": "dsdis",
"groupId": "1811644",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqJq4la",
"doi": "10.1109/DSDIS.2015.102",
"title": "Step Detection from Power Generation Pattern in Energy-Harvesting Wearable Devices",
"normalizedTitle": "Step Detection from Power Generation Pattern in Energy-Harvesting Wearable Devices",
"abstract": "Energy-harvesting wearable devices generate power by converting natural phenomena such as human motion into usable electricity. We conduct an experimental study to validate the feasibility of detecting steps from the power generation patterns of a wearable piezoelectric energy harvester (PEH). Four healthy adults took part in the study, which includes walking along straight and turning walkways as well as descending and ascending stairs. We find that power generation exhibits distinctive peaks for each step, making it possible to accurately detect steps using widely used peak detection algorithms. Using our PEH prototype, we successfully detected 550 steps out of 570, achieving a step detection accuracy of 96%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Energy-harvesting wearable devices generate power by converting natural phenomena such as human motion into usable electricity. We conduct an experimental study to validate the feasibility of detecting steps from the power generation patterns of a wearable piezoelectric energy harvester (PEH). Four healthy adults took part in the study, which includes walking along straight and turning walkways as well as descending and ascending stairs. We find that power generation exhibits distinctive peaks for each step, making it possible to accurately detect steps using widely used peak detection algorithms. Using our PEH prototype, we successfully detected 550 steps out of 570, achieving a step detection accuracy of 96%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Energy-harvesting wearable devices generate power by converting natural phenomena such as human motion into usable electricity. We conduct an experimental study to validate the feasibility of detecting steps from the power generation patterns of a wearable piezoelectric energy harvester (PEH). Four healthy adults took part in the study, which includes walking along straight and turning walkways as well as descending and ascending stairs. We find that power generation exhibits distinctive peaks for each step, making it possible to accurately detect steps using widely used peak detection algorithms. Using our PEH prototype, we successfully detected 550 steps out of 570, achieving a step detection accuracy of 96%.",
"fno": "0214a604",
"keywords": [
"Accelerometers",
"Legged Locomotion",
"Prototypes",
"Acceleration",
"Power Generation",
"Biomedical Monitoring",
"Turning"
],
"authors": [
{
"affiliation": null,
"fullName": "Sara Khalifa",
"givenName": "Sara",
"surname": "Khalifa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mahbub Hassan",
"givenName": "Mahbub",
"surname": "Hassan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Aruna Seneviratne",
"givenName": "Aruna",
"surname": "Seneviratne",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dsdis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "604-610",
"year": "2015",
"issn": null,
"isbn": "978-1-5090-0214-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0214a594",
"articleId": "12OmNzy7uRm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0214a611",
"articleId": "12OmNAlNiRz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2016/3811/0/07738020",
"title": "Generalized activity recognition using accelerometer in wearable devices for IoT applications",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2016/07738020/12OmNviHKmo",
"parentPublication": {
"id": "proceedings/avss/2016/3811/0",
"title": "2016 13th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2016/1941/0/07457058",
"title": "Secure key generation and distribution protocol for wearable devices",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2016/07457058/12OmNwE9Oqt",
"parentPublication": {
"id": "proceedings/percomw/2016/1941/0",
"title": "2016 IEEE International Conference on Pervasive Computing and Communication Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/soca/2017/1326/0/1326a245",
"title": "Analysis of Health and Physiological Index Based on Sleep and Walking Steps by Wearable Devices for the Elderly",
"doi": null,
"abstractUrl": "/proceedings-article/soca/2017/1326a245/12OmNx3ZjjQ",
"parentPublication": {
"id": "proceedings/soca/2017/1326/0",
"title": "2017 IEEE 10th Conference on Service-Oriented Computing and Applications (SOCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2015/8425/0/07134076",
"title": "A novel estimation method of road condition for pedestrian navigation",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2015/07134076/12OmNxETalS",
"parentPublication": {
"id": "proceedings/percomw/2015/8425/0",
"title": "2015 IEEE International Conference on Pervasive Computing and Communication Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2017/3932/0/3932a324",
"title": "Step detection algorithm for accurate distance estimation using dynamic step length",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2017/3932a324/12OmNylKAXz",
"parentPublication": {
"id": "proceedings/mdm/2017/3932/0",
"title": "2017 18th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2018/06/08063906",
"title": "HARKE: Human Activity Recognition from Kinetic Energy Harvesting Data in Wearable Devices",
"doi": null,
"abstractUrl": "/journal/tm/2018/06/08063906/13rRUwInvzc",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2017/3220/1/08005782",
"title": "Gaitsense: A Potential Assistance for Physical Rehabilitation by Means of Wearable Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2017/08005782/17D45W9KVJt",
"parentPublication": {
"id": "proceedings/cse-euc/2017/3220/1",
"title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-iucc-bdcloud-socialcom-sustaincom/2018/1141/0/114100a878",
"title": "An Energy Efficient Smartphone Pedometer Based on an Auto-Correlation Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-iucc-bdcloud-socialcom-sustaincom/2018/114100a878/18AuKxKSkO4",
"parentPublication": {
"id": "proceedings/ispa-iucc-bdcloud-socialcom-sustaincom/2018/1141/0",
"title": "2018 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Ubiquitous Computing & Communications, Big Data & Cloud Computing, Social Computing & Networking, Sustainable Computing & Communications (ISPA/IUCC/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2017/4338/0/07917600",
"title": "Measuring changes in gait and vehicle transfer ability during inpatient rehabilitation with wearable inertial sensors",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2017/07917600/19wAFWJqdEs",
"parentPublication": {
"id": "proceedings/percom-workshops/2017/4338/0",
"title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/2/260702a477",
"title": "Activity Segmentation Using Wearable Sensors for DVT/PE Risk Detection",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260702a477/1cYiu6yPyG4",
"parentPublication": {
"id": "compsac/2019/2607/2",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwp74rn",
"title": "2016 IEEE 13th International Conference on Mobile Ad Hoc and Sensor Systems (MASS)",
"acronym": "mass",
"groupId": "1001499",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwoPtvY",
"doi": "10.1109/MASS.2016.034",
"title": "Track Your Foot Step: Anchor-Free Indoor Localization Based on Sensing Users' Foot Steps",
"normalizedTitle": "Track Your Foot Step: Anchor-Free Indoor Localization Based on Sensing Users' Foot Steps",
"abstract": "Currently, conventional indoor localization schemes mainly leverage WiFi-based or Bluetooth-based schemes to locate the users in the indoor environment. These schemes require to deploy the infrastructures such as the WiFi APs and Bluetooth beacons in advance to assist indoor localization. This property hinders the indoor localization schemes in that they are not scalable to any other situations without these infrastructures. In this paper, we propose FootStep-Tracker, an anchor-free indoor localization scheme purely based on sensing the user's footsteps. By embedding the tiny SensorTag into the user's shoes, FootStep-Tracker is able to accurately perceive the user's moving trace, including the moving direction and distance, by leveraging the accelerometers and gyroscopes. Furthermore, by detecting the user's activities such as ascending/descending the stairs and taking an elevator, FootStep-Tracker can effectively correlate with the specified positions such as stairs and elevators, and further determine the exacted moving traces in the indoor map by leveraging the space constraints in the map. Realistic experiment results show that, FootStep-Tracker is able to achieve an average localization accuracy of 1m for indoor localization, without any infrastructures having been deployed in advance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Currently, conventional indoor localization schemes mainly leverage WiFi-based or Bluetooth-based schemes to locate the users in the indoor environment. These schemes require to deploy the infrastructures such as the WiFi APs and Bluetooth beacons in advance to assist indoor localization. This property hinders the indoor localization schemes in that they are not scalable to any other situations without these infrastructures. In this paper, we propose FootStep-Tracker, an anchor-free indoor localization scheme purely based on sensing the user's footsteps. By embedding the tiny SensorTag into the user's shoes, FootStep-Tracker is able to accurately perceive the user's moving trace, including the moving direction and distance, by leveraging the accelerometers and gyroscopes. Furthermore, by detecting the user's activities such as ascending/descending the stairs and taking an elevator, FootStep-Tracker can effectively correlate with the specified positions such as stairs and elevators, and further determine the exacted moving traces in the indoor map by leveraging the space constraints in the map. Realistic experiment results show that, FootStep-Tracker is able to achieve an average localization accuracy of 1m for indoor localization, without any infrastructures having been deployed in advance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Currently, conventional indoor localization schemes mainly leverage WiFi-based or Bluetooth-based schemes to locate the users in the indoor environment. These schemes require to deploy the infrastructures such as the WiFi APs and Bluetooth beacons in advance to assist indoor localization. This property hinders the indoor localization schemes in that they are not scalable to any other situations without these infrastructures. In this paper, we propose FootStep-Tracker, an anchor-free indoor localization scheme purely based on sensing the user's footsteps. By embedding the tiny SensorTag into the user's shoes, FootStep-Tracker is able to accurately perceive the user's moving trace, including the moving direction and distance, by leveraging the accelerometers and gyroscopes. Furthermore, by detecting the user's activities such as ascending/descending the stairs and taking an elevator, FootStep-Tracker can effectively correlate with the specified positions such as stairs and elevators, and further determine the exacted moving traces in the indoor map by leveraging the space constraints in the map. Realistic experiment results show that, FootStep-Tracker is able to achieve an average localization accuracy of 1m for indoor localization, without any infrastructures having been deployed in advance.",
"fno": "2833a201",
"keywords": [
"Elevators",
"Sensors",
"Accelerometers",
"Gyroscopes",
"Legged Locomotion",
"IEEE 802 11 Standard",
"Smart Phones",
"Sensing Foot Steps",
"Anchor Free",
"Indoor Localization"
],
"authors": [
{
"affiliation": null,
"fullName": "Chang Liu",
"givenName": "Chang",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lei Xie",
"givenName": "Lei",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chuyu Wang",
"givenName": "Chuyu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jie Wu",
"givenName": "Jie",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sanglu Lu",
"givenName": "Sanglu",
"surname": "Lu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mass",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-10-01T00:00:00",
"pubType": "proceedings",
"pages": "201-209",
"year": "2016",
"issn": "2155-6814",
"isbn": "978-1-5090-2833-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2833a192",
"articleId": "12OmNxFaLpV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2833a210",
"articleId": "12OmNBpVPS0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/uic-atc-scalcom/2014/7646/0/7646a349",
"title": "Subtractive Clustering as ZUPT Detector",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc-scalcom/2014/7646a349/12OmNxXl5Fp",
"parentPublication": {
"id": "proceedings/uic-atc-scalcom/2014/7646/0",
"title": "2014 IEEE 11th Intl Conf on Ubiquitous Intelligence & Computing and 2014 IEEE 11th Intl Conf on Autonomic & Trusted Computing and 2014 IEEE 14th Intl Conf on Scalable Computing and Communications and Its Associated Workshops (UIC-ATC-ScalCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209d505",
"title": "Pose Invariant Activity Classification for Multi-floor Indoor Localization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209d505/12OmNy2rRTy",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icws/2016/2675/0/2675a180",
"title": "Indoor Localization Service Based on the Data Fusion of Wi-Fi and RFID",
"doi": null,
"abstractUrl": "/proceedings-article/icws/2016/2675a180/12OmNyL0TLg",
"parentPublication": {
"id": "proceedings/icws/2016/2675/0",
"title": "2016 IEEE International Conference on Web Services (ICWS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2015/7632/0/07301734",
"title": "A stigmergic approach to indoor localization using Bluetooth Low Energy beacons",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2015/07301734/12OmNz5apMV",
"parentPublication": {
"id": "proceedings/avss/2015/7632/0",
"title": "2015 12th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wowmom/2014/4786/0/06918960",
"title": "ILPS: Indoor localization using physical maps and smartphone sensors",
"doi": null,
"abstractUrl": "/proceedings-article/wowmom/2014/06918960/12OmNzC5SYr",
"parentPublication": {
"id": "proceedings/wowmom/2014/4786/0",
"title": "2014 IEEE 15th International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2014/6036/0/6036a064",
"title": "Navigating in Signal Space: A Crowd-Sourced Sensing Map Construction for Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2014/6036a064/12OmNzGDsIR",
"parentPublication": {
"id": "proceedings/mass/2014/6036/0",
"title": "2014 IEEE 11th International Conference on Mobile Ad Hoc and Sensor Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2015/0329/0/0329a172",
"title": "HiHeading: Smartphone-Based Indoor Map Construction System with High Accuracy Heading Inference",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2015/0329a172/12OmNzSyCbP",
"parentPublication": {
"id": "proceedings/msn/2015/0329/0",
"title": "2015 11th International Conference on Mobile Ad-hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2014/4224/0/4224a465",
"title": "Smartphone Indoor Localization with Accelerometer and Gyroscope",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2014/4224a465/12OmNzTH0S7",
"parentPublication": {
"id": "proceedings/nbis/2014/4224/0",
"title": "2014 17th International Conference on Network-Based Information Systems (NBiS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isads/2015/8260/0/07098250",
"title": "A Foot-Mounted Sensor Based 3D Indoor Positioning Approach",
"doi": null,
"abstractUrl": "/proceedings-article/isads/2015/07098250/12OmNzZmZhx",
"parentPublication": {
"id": "proceedings/isads/2015/8260/0",
"title": "2015 IEEE Twelfth International Symposium on Autonomous Decentralized System (ISADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2013/02/mpc2013020017",
"title": "Tutorial: Implementing a Pedestrian Tracker Using Inertial Sensors",
"doi": null,
"abstractUrl": "/magazine/pc/2013/02/mpc2013020017/13rRUxAASQw",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxX3uN2",
"title": "2014 International Conference on Wireless Communication and Sensor Network",
"acronym": "wcsn",
"groupId": "1849269",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx38vUn",
"doi": "10.1109/WCSN.2014.88",
"title": "A New Approach of Real Time Step Length Estimation for Waist Mounted PDR System",
"normalizedTitle": "A New Approach of Real Time Step Length Estimation for Waist Mounted PDR System",
"abstract": "Pedestrian dead reckoning (PDR) is a promising solution for indoor positioning. It can work without either GPS signals or Wi-Fi coverage. Commonly, a foot mounted PDR system is relatively more accurate, but a PDR system mounted on the waist or integrated in a Smartphone can be more convenient. However, such systems have difficulties with step length estimation. Most of them estimate step length indirectly according to acceleration features. This paper proposes a new approach of step length estimation for waist mounted PDR system which does not require detection of zero velocity point. Our approach can provide 96.9% accuracy in walking distance estimation. In addition, we also discussed the influence of mounting position of sensors to the accuracy. Finally, we applied the principle of step length estimation to state detection, distinguishing normal walking, descending or ascending. Tests show that as high as 81%, our method can correctly distinguish the person's walking status with 81% accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Pedestrian dead reckoning (PDR) is a promising solution for indoor positioning. It can work without either GPS signals or Wi-Fi coverage. Commonly, a foot mounted PDR system is relatively more accurate, but a PDR system mounted on the waist or integrated in a Smartphone can be more convenient. However, such systems have difficulties with step length estimation. Most of them estimate step length indirectly according to acceleration features. This paper proposes a new approach of step length estimation for waist mounted PDR system which does not require detection of zero velocity point. Our approach can provide 96.9% accuracy in walking distance estimation. In addition, we also discussed the influence of mounting position of sensors to the accuracy. Finally, we applied the principle of step length estimation to state detection, distinguishing normal walking, descending or ascending. Tests show that as high as 81%, our method can correctly distinguish the person's walking status with 81% accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Pedestrian dead reckoning (PDR) is a promising solution for indoor positioning. It can work without either GPS signals or Wi-Fi coverage. Commonly, a foot mounted PDR system is relatively more accurate, but a PDR system mounted on the waist or integrated in a Smartphone can be more convenient. However, such systems have difficulties with step length estimation. Most of them estimate step length indirectly according to acceleration features. This paper proposes a new approach of step length estimation for waist mounted PDR system which does not require detection of zero velocity point. Our approach can provide 96.9% accuracy in walking distance estimation. In addition, we also discussed the influence of mounting position of sensors to the accuracy. Finally, we applied the principle of step length estimation to state detection, distinguishing normal walking, descending or ascending. Tests show that as high as 81%, our method can correctly distinguish the person's walking status with 81% accuracy.",
"fno": "7091a400",
"keywords": [
"Acceleration",
"Estimation",
"Foot",
"Mathematical Model",
"Legged Locomotion",
"Accuracy",
"IEEE 802 11 Standards",
"Walking Status Detection",
"PDR System",
"Step Length Estimation"
],
"authors": [
{
"affiliation": null,
"fullName": "Kai Zhao",
"givenName": "Kai",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bing-Hao Li",
"givenName": "Bing-Hao",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Andrew G. Dempster",
"givenName": "Andrew G.",
"surname": "Dempster",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wcsn",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-12-01T00:00:00",
"pubType": "proceedings",
"pages": "400-406",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-7091-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7091a395",
"articleId": "12OmNzzxuuL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7091a407",
"articleId": "12OmNyUWRa7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/robot/1988/0852/0/00012162",
"title": "Legged robots on rough terrain: experiments in adjusting step length",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012162/12OmNBUAvYs",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmu/2017/31/0/08330105",
"title": "Pedestrian direction estimation for each step using plane component of accelerometer",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2017/08330105/12OmNvDqsBa",
"parentPublication": {
"id": "proceedings/icmu/2017/31/0",
"title": "2017 Tenth International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402584",
"title": "A waist-mounted ProCam system for remote collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402584/12OmNwGZNJB",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2017/3932/0/3932a324",
"title": "Step detection algorithm for accurate distance estimation using dynamic step length",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2017/3932a324/12OmNylKAXz",
"parentPublication": {
"id": "proceedings/mdm/2017/3932/0",
"title": "2017 18th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2011/0774/0/05959591",
"title": "Collaborative PDR Localisation with Mobile Phones",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2011/05959591/12OmNzaQorl",
"parentPublication": {
"id": "proceedings/iswc/2011/0774/0",
"title": "2011 15th Annual International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2018/5892/0/08466455",
"title": "A Step Towards Design and Validation of Portable, Cost-effective Device for Gait Characterization",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2018/08466455/13Jkrb8ju9i",
"parentPublication": {
"id": "proceedings/icis/2018/5892/0",
"title": "2018 IEEE/ACIS 17th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699289",
"title": "Walking-in-Place for VR Navigation Independent of Gaze Direction Using a Waist-Worn Inertial Measurement Unit",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699289/19F1PlWtKJa",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2022/5478/0/547800a263",
"title": "Cost-efficient UWB and PDR Hybrid Pedestrian Positioning System Performing Incremental Position Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2022/547800a263/1JeDomksNO0",
"parentPublication": {
"id": "proceedings/icdh/2022/5478/0",
"title": "2022 9th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2019/2583/0/258300a937",
"title": "Heading Judgment for the Waist-Mounted MIMU Using LSTM",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2019/258300a937/1h5WqnlTzOw",
"parentPublication": {
"id": "proceedings/icpads/2019/2583/0",
"title": "2019 IEEE 25th International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmu/2019/41/0/09006673",
"title": "A PDR Smartphone Application Considering Side/Backward Steps",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2019/09006673/1hJttsCV8xa",
"parentPublication": {
"id": "proceedings/icmu/2019/41/0",
"title": "2019 Twelfth International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxpAQuq8o",
"doi": "10.1109/VRW50115.2020.00176",
"title": "A Constrained Path Redirection for Passive Haptics",
"normalizedTitle": "A Constrained Path Redirection for Passive Haptics",
"abstract": "Navigation with passive haptic feedback can enhance users’ immersion in virtual environments. We propose a constrained path redirection method to provide users with corresponding haptic feedback at the right time and place. We have quantified the VR exploration practicality in a study and the results show advantages over steer-to-center method in terms of presence, and over Steinicke’s method in terms of matching errors and presence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Navigation with passive haptic feedback can enhance users’ immersion in virtual environments. We propose a constrained path redirection method to provide users with corresponding haptic feedback at the right time and place. We have quantified the VR exploration practicality in a study and the results show advantages over steer-to-center method in terms of presence, and over Steinicke’s method in terms of matching errors and presence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Navigation with passive haptic feedback can enhance users’ immersion in virtual environments. We propose a constrained path redirection method to provide users with corresponding haptic feedback at the right time and place. We have quantified the VR exploration practicality in a study and the results show advantages over steer-to-center method in terms of presence, and over Steinicke’s method in terms of matching errors and presence.",
"fno": "09090521",
"keywords": [
"Task Analysis",
"Haptic Interfaces",
"Legged Locomotion",
"Virtual Environments",
"Navigation",
"Cats",
"Virtual Reality",
"Navigation",
"Redirected Walking",
"Passive Haptics"
],
"authors": [
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems, Beijing Advanced Innovation Center for Biomedical Engineering,Beijing,China",
"fullName": "Lili Wang",
"givenName": "Lili",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems, Beijing Advanced Innovation Center for Biomedical Engineering,Beijing,China",
"fullName": "Zixìang Zhao",
"givenName": "Zixìang",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems, Beijing Advanced Innovation Center for Biomedical Engineering,Beijing,China",
"fullName": "Xuefeng Yang",
"givenName": "Xuefeng",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Auckland,Peng Cheng Laboratory,Auckland,New Zealand",
"fullName": "Huidong Bai",
"givenName": "Huidong",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Auckland,Peng Cheng Laboratory,Auckland,New Zealand",
"fullName": "Amit Barde",
"givenName": "Amit",
"surname": "Barde",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Auckland,Peng Cheng Laboratory,Auckland,New Zealand",
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "650-651",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090495",
"articleId": "1jIximIpClq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090548",
"articleId": "1jIxsIo58PK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2018/04/08260962",
"title": "Ascending and Descending in Virtual Reality: Simple and Safe System Using Passive Haptics",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260962/13rRUwjGoLM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a167",
"title": "Foldable Spaces: An Overt Redirection Approach for Natural Walking in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a167/1CJc5J6RYYM",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a464",
"title": "RedirectedDoors: Redirection While Opening Doors in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a464/1CJc9xfqBSo",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a083",
"title": "Tapping with a Handheld Stick in VR: Redirection Detection Thresholds for Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a083/1CJcjWU39wQ",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a748",
"title": "Wormholes in VR: Teleporting Hands for Flexible Passive Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a748/1JrR93EDicE",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049705",
"title": "Dynamic Redirection for VR Haptics with a Handheld Stick",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049705/1KYovqncdKo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798143",
"title": "Estimating Detection Thresholds for Desktop-Scale Hand Redirection in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798143/1cJ0GRxSQwM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089579",
"title": "Feature Guided Path Redirection for VR Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089579/1jIx7XMm676",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382898",
"title": "Combining Dynamic Passive Haptics and Haptic Retargeting for Enhanced Haptic Feedback in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382898/1saZv7Dd9Ty",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a184",
"title": "A Reinforcement Learning Approach to Redirected Walking with Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a184/1yeCXhKVTXy",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxps3cZgs",
"doi": "10.1109/VRW50115.2020.00187",
"title": "Effects of Physical Prop Shape on Virtual Stairs Travel Techniques",
"normalizedTitle": "Effects of Physical Prop Shape on Virtual Stairs Travel Techniques",
"abstract": "Experiences of Virtual Reality training and architectural virtual environments benefit when provided a higher sensation of stair climbing. Passive haptic props can add that sensation. These methods present a safe approach by placing short ramps on the floor rather than a physical staircase. To improve a user’s level of immersion, we conducted an experiment to explore the shape of physical props to change the way users were aligned and moved while traveling up or down a virtual set of stairs. We investigated three methods for physical props while ascending and descending virtual stairs. Results suggest that elongated props provide a better experience and are more preferred.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Experiences of Virtual Reality training and architectural virtual environments benefit when provided a higher sensation of stair climbing. Passive haptic props can add that sensation. These methods present a safe approach by placing short ramps on the floor rather than a physical staircase. To improve a user’s level of immersion, we conducted an experiment to explore the shape of physical props to change the way users were aligned and moved while traveling up or down a virtual set of stairs. We investigated three methods for physical props while ascending and descending virtual stairs. Results suggest that elongated props provide a better experience and are more preferred.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Experiences of Virtual Reality training and architectural virtual environments benefit when provided a higher sensation of stair climbing. Passive haptic props can add that sensation. These methods present a safe approach by placing short ramps on the floor rather than a physical staircase. To improve a user’s level of immersion, we conducted an experiment to explore the shape of physical props to change the way users were aligned and moved while traveling up or down a virtual set of stairs. We investigated three methods for physical props while ascending and descending virtual stairs. Results suggest that elongated props provide a better experience and are more preferred.",
"fno": "09090408",
"keywords": [
"Legged Locomotion",
"Three Dimensional Displays",
"Virtual Reality",
"Shape",
"Haptic Interfaces",
"Stairs"
],
"authors": [
{
"affiliation": "University of Wyoming,Interactive Realities Research Laboratory,Laramie,WY",
"fullName": "Connor Kasarda",
"givenName": "Connor",
"surname": "Kasarda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Wyoming,Interactive Realities Research Laboratory,Laramie,WY",
"fullName": "Maria Swartz",
"givenName": "Maria",
"surname": "Swartz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Wyoming,Interactive Realities Research Laboratory,Laramie,WY",
"fullName": "Kyle Mitchell",
"givenName": "Kyle",
"surname": "Mitchell",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Wyoming,Interactive Realities Research Laboratory,Laramie,WY",
"fullName": "Rajiv Khadka",
"givenName": "Rajiv",
"surname": "Khadka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Wyoming,Interactive Realities Research Laboratory,Laramie,WY",
"fullName": "Amy Banić",
"givenName": "Amy",
"surname": "Banić",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "672-673",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090590",
"articleId": "1jIxhRmAxBC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090644",
"articleId": "1jIxuxGS1So",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504715",
"title": "Vestibulohaptic passive stimulation for a walking sensation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504715/12OmNxu6p8R",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260962",
"title": "Ascending and Descending in Virtual Reality: Simple and Safe System Using Passive Haptics",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260962/13rRUwjGoLM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a884",
"title": "Studying the Effect of Physical Realism on Time Perception in a HAZMAT VR Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a884/1CJeHh7xkYw",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09961901",
"title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2019/4050/0/08809587",
"title": "Leveraging Change Blindness for Haptic Remapping in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2019/08809587/1cI62p6yHYs",
"parentPublication": {
"id": "proceedings/wevr/2019/4050/0",
"title": "2019 IEEE 5th Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/04/09057399",
"title": "Making the Invisible Visible: Illuminating the Hidden Histories of the World War I Tunnels at Vauquois Through a Hybridized Virtual Reality Exhibition",
"doi": null,
"abstractUrl": "/magazine/cg/2020/04/09057399/1iUHRwkxh7i",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089462",
"title": "VR Bridges: Simulating Smooth Uneven Surfaces in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089462/1jIxeZPD4LS",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a134",
"title": "Proxy Haptics for Surgical Training",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a134/1oZBAEAmMBW",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2020/2314/0/231400a392",
"title": "Development and experimental study of multi-motion model robot with wheel-track compound structure",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2020/231400a392/1tzyBY0fPwI",
"parentPublication": {
"id": "proceedings/icmcce/2020/2314/0",
"title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a184",
"title": "A Reinforcement Learning Approach to Redirected Walking with Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a184/1yeCXhKVTXy",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1zktbPUzbeE",
"title": "2021 Thirteenth International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"acronym": "icmu",
"groupId": "1803606",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1zktcJKr1Bu",
"doi": "10.23919/ICMU50196.2021.9638935",
"title": "Motion recognition scheme for supporting indoor PDR by partial introduction of reference data",
"normalizedTitle": "Motion recognition scheme for supporting indoor PDR by partial introduction of reference data",
"abstract": "After a disaster such as an earthquake, if evacuation route information of evacuees to the outside of a building can be recorded on their smartphones on the principle of PDR(Pedestrian Dead Reckoning) and passed to rescue staffs, it would be useful for them to reach the location of injured persons in the building. Assuming this scenario, we have proposed the motion recognition scheme of a person using a smartphone’s rotation vector sensor, an acceleration sensor and so on. The limitation of this scheme was that it needs sensor data of the person obtained in the walking state during an evacuation as reference data. In this paper, we propose a method to acquire reference data of the walking state regarding the target person in his or her daily life in advance and use it to recognize the moving state at the time of evacuation. Experimental results show that recall rates were over 80% in each motion state of walking, running, descending stairs, and ascending stairs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "After a disaster such as an earthquake, if evacuation route information of evacuees to the outside of a building can be recorded on their smartphones on the principle of PDR(Pedestrian Dead Reckoning) and passed to rescue staffs, it would be useful for them to reach the location of injured persons in the building. Assuming this scenario, we have proposed the motion recognition scheme of a person using a smartphone’s rotation vector sensor, an acceleration sensor and so on. The limitation of this scheme was that it needs sensor data of the person obtained in the walking state during an evacuation as reference data. In this paper, we propose a method to acquire reference data of the walking state regarding the target person in his or her daily life in advance and use it to recognize the moving state at the time of evacuation. Experimental results show that recall rates were over 80% in each motion state of walking, running, descending stairs, and ascending stairs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "After a disaster such as an earthquake, if evacuation route information of evacuees to the outside of a building can be recorded on their smartphones on the principle of PDR(Pedestrian Dead Reckoning) and passed to rescue staffs, it would be useful for them to reach the location of injured persons in the building. Assuming this scenario, we have proposed the motion recognition scheme of a person using a smartphone’s rotation vector sensor, an acceleration sensor and so on. The limitation of this scheme was that it needs sensor data of the person obtained in the walking state during an evacuation as reference data. In this paper, we propose a method to acquire reference data of the walking state regarding the target person in his or her daily life in advance and use it to recognize the moving state at the time of evacuation. Experimental results show that recall rates were over 80% in each motion state of walking, running, descending stairs, and ascending stairs.",
"fno": "09638935",
"keywords": [
"Disasters",
"Emergency Management",
"Gait Analysis",
"Indoor Radio",
"Pedestrians",
"Sensors",
"Smart Phones",
"Motion Recognition Scheme",
"Indoor PDR",
"Partial Introduction",
"Reference Data",
"Smartphones",
"Injured Persons",
"Smartphone",
"Acceleration Sensor",
"Sensor Data",
"Walking State",
"Target Person",
"Pedestrian Dead Reckoning",
"Disasters",
"Legged Locomotion",
"Pressure Sensors",
"Dead Reckoning",
"Target Recognition",
"Buildings",
"Earthquakes",
"Training Data",
"Motion State Estimation",
"Smartphone",
"Pedestrian Dead Reckoning",
"Rotation Vector Sensor"
],
"authors": [
{
"affiliation": "Shibaura Institute of Technology, 3-7-5 Toyosu,Graduate School of Engineering and Science,Koto-ku,Tokyo,Japan,135-8548",
"fullName": "Chisaki Takahashi",
"givenName": "Chisaki",
"surname": "Takahashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shibaura Institute of Technology, 3-7-5 Toyosu,Graduate School of Engineering and Science,Koto-ku,Tokyo,Japan,135-8548",
"fullName": "Hiroaki Morino",
"givenName": "Hiroaki",
"surname": "Morino",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmu",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-2",
"year": "2021",
"issn": null,
"isbn": "978-4-907626-48-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09638813",
"articleId": "1zktgEEwMKs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09638850",
"articleId": "1zktcndSzE4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmu/2017/31/0/08330083",
"title": "Detection of half-turn stairs from walking trajectories estimated by pedestrian dead reckoning",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2017/08330083/12OmNBSjIWK",
"parentPublication": {
"id": "proceedings/icmu/2017/31/0",
"title": "2017 Tenth International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2010/5329/0/05466984",
"title": "AutoGait: A mobile platform that accurately estimates the distance walked",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2010/05466984/12OmNy87Qw7",
"parentPublication": {
"id": "proceedings/percom/2010/5329/0",
"title": "2010 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460995",
"title": "Inertial-sensor-based walking action recognition using robust step detection and inter-class relationships",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460995/12OmNzC5Tr5",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2011/938/0/05766963",
"title": "An energy-efficient strategy for combined RSS-PDR indoor localization",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2011/05766963/17D45VsBU56",
"parentPublication": {
"id": "proceedings/percomw/2011/938/0",
"title": "2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops 2011). PerCom-Workshops 2011: 2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmu/2018/34/0/08653585",
"title": "Partial Matching Estimation Method of Walking Trajectories for Generating Indoor Pedestrian Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2018/08653585/183rAdTELtP",
"parentPublication": {
"id": "proceedings/icmu/2018/34/0",
"title": "2018 Eleventh International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmu/2018/34/0/08653597",
"title": "Estimating Distance of Going Up and down Stairs in a Building Using the Smartphone’s Rotation Vector Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2018/08653597/183rAfA79lv",
"parentPublication": {
"id": "proceedings/icmu/2018/34/0",
"title": "2018 Eleventh International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2021/0878/0/087800a008",
"title": "A Novel iBeacon Deployment Scheme for Indoor Pedestrian Positioning",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2021/087800a008/1D4M1oJpl1C",
"parentPublication": {
"id": "proceedings/icpads/2021/0878/0",
"title": "2021 IEEE 27th International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2022/5478/0/547800a263",
"title": "Cost-efficient UWB and PDR Hybrid Pedestrian Positioning System Performing Incremental Position Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2022/547800a263/1JeDomksNO0",
"parentPublication": {
"id": "proceedings/icdh/2022/5478/0",
"title": "2022 9th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmu/2019/41/0/09006673",
"title": "A PDR Smartphone Application Considering Side/Backward Steps",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2019/09006673/1hJttsCV8xa",
"parentPublication": {
"id": "proceedings/icmu/2019/41/0",
"title": "2019 Twelfth International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2020/8666/0/866600a416",
"title": "Research on Indoor Location Technology based on the Fusion of WiFi and PDR",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2020/866600a416/1wRIvb2bjPO",
"parentPublication": {
"id": "proceedings/icicta/2020/8666/0",
"title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwB2dUd",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBubORd",
"doi": "10.1109/3DUI.2016.7460053",
"title": "Combating VR sickness through subtle dynamic field-of-view modification",
"normalizedTitle": "Combating VR sickness through subtle dynamic field-of-view modification",
"abstract": "Virtual Reality (VR) sickness can cause intense discomfort, shorten the duration of a VR experience, and create an aversion to further use of VR. High-quality tracking systems can minimize the mismatch between a user's visual perception of the virtual environment (VE) and the response of their vestibular system, diminishing VR sickness for moving users. However, this does not help users who do not or cannot move physically the way they move virtually, because of preference or physical limitations such as a disability. It has been noted that decreasing field of view (FOV) tends to decrease VR sickness, though at the expense of sense of presence. To address this tradeoff, we explore the effect of dynamically, yet subtly, changing a physically stationary person's FOV in response to visually perceived motion as they virtually traverse a VE. We report the results of a two-session, multi-day study with 30 participants. Each participant was seated in a stationary chair, wearing a stereoscopic head-worn display, and used control and FOV-modifying conditions in the same VE. Our data suggests that by strategically and automatically manipulating FOV during a VR session, we can reduce the degree of VR sickness perceived by participants and help them adapt to VR, without decreasing their subjective level of presence, and minimizing their awareness of the intervention.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality (VR) sickness can cause intense discomfort, shorten the duration of a VR experience, and create an aversion to further use of VR. High-quality tracking systems can minimize the mismatch between a user's visual perception of the virtual environment (VE) and the response of their vestibular system, diminishing VR sickness for moving users. However, this does not help users who do not or cannot move physically the way they move virtually, because of preference or physical limitations such as a disability. It has been noted that decreasing field of view (FOV) tends to decrease VR sickness, though at the expense of sense of presence. To address this tradeoff, we explore the effect of dynamically, yet subtly, changing a physically stationary person's FOV in response to visually perceived motion as they virtually traverse a VE. We report the results of a two-session, multi-day study with 30 participants. Each participant was seated in a stationary chair, wearing a stereoscopic head-worn display, and used control and FOV-modifying conditions in the same VE. Our data suggests that by strategically and automatically manipulating FOV during a VR session, we can reduce the degree of VR sickness perceived by participants and help them adapt to VR, without decreasing their subjective level of presence, and minimizing their awareness of the intervention.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality (VR) sickness can cause intense discomfort, shorten the duration of a VR experience, and create an aversion to further use of VR. High-quality tracking systems can minimize the mismatch between a user's visual perception of the virtual environment (VE) and the response of their vestibular system, diminishing VR sickness for moving users. However, this does not help users who do not or cannot move physically the way they move virtually, because of preference or physical limitations such as a disability. It has been noted that decreasing field of view (FOV) tends to decrease VR sickness, though at the expense of sense of presence. To address this tradeoff, we explore the effect of dynamically, yet subtly, changing a physically stationary person's FOV in response to visually perceived motion as they virtually traverse a VE. We report the results of a two-session, multi-day study with 30 participants. Each participant was seated in a stationary chair, wearing a stereoscopic head-worn display, and used control and FOV-modifying conditions in the same VE. Our data suggests that by strategically and automatically manipulating FOV during a VR session, we can reduce the degree of VR sickness perceived by participants and help them adapt to VR, without decreasing their subjective level of presence, and minimizing their awareness of the intervention.",
"fno": "07460053",
"keywords": [
"Cameras",
"Tracking",
"Geometry",
"Virtual Environments",
"Visual Perception",
"Stereo Image Processing",
"Field Of View",
"VR Sickness",
"Cybersickness",
"Virtual Reality",
"Head Worn Display"
],
"authors": [
{
"affiliation": "Columbia University",
"fullName": "Ajoy S Fernandes",
"givenName": "Ajoy S",
"surname": "Fernandes",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Columbia University",
"fullName": "Steven K. Feiner",
"givenName": "Steven K.",
"surname": "Feiner",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "201-210",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-0842-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07460052",
"articleId": "12OmNC4eSwS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07460054",
"articleId": "12OmNqJHFoq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2002/1492/0/14920164",
"title": "Effects of Field of View on Presence, Enjoyment, Memory, and Simulator Sickness in a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920164/12OmNvUsoqB",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a075",
"title": "[POSTER] Prevention of Visually Induced Motion Sickness Based on Dynamic Real-Time Content-Aware Non-salient Area Blurring",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a075/12OmNzC5Tor",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446346",
"title": "Reducing VR Sickness Through Peripheral Visual Effects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446346/13bd1fHrlRY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a169",
"title": "Development of VR Motion Sickness Test Platform Based on UE",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a094",
"title": "An EEG-based Experiment on VR Sickness and Postural Instability While Walking in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a094/1MNgWtYsR5S",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798213",
"title": "VR Sickness Prediction for Navigation in Immersive Virtual Environments using a Deep Long Short Term Memory Model",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798213/1cJ0RYruJIA",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798136",
"title": "VR Sickness in Continuous Exposure to Live-action 180°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798136/1cJ1gPJX2og",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089437",
"title": "The Effect of a Foveated Field-of-view Restrictor on VR Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089437/1jIxcfT0Wt2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a735",
"title": "[DC] Towards Universal VR Sickness Mitigation Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwx3QdZ",
"doi": "10.1109/VR.2017.7892284",
"title": "Diminished reality for acceleration stimulus: Motion sickness reduction with vection for autonomous driving",
"normalizedTitle": "Diminished reality for acceleration stimulus: Motion sickness reduction with vection for autonomous driving",
"abstract": "This paper presents an approach for motion sickness reduction while riding an autonomous vehicle. It proposes the Diminished Reality (DR) method for an acceleration stimulus to reduce motion sickness for the autonomous vehicle. One of the main causes of motion sickness is a repeated acceleration. In order to diminish the acceleration stimulus in the autonomous vehicle, vection illusion is used to induce the user to make a preliminary movement against the real acceleration. The Balance Wii Board is used to measure participant's movement of the center of gravity to verify the effectiveness of the method with vection. The experimental result of 9 participants shows that the proposed method of using vection could reduce acceleration stimulus compared with the conventional method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an approach for motion sickness reduction while riding an autonomous vehicle. It proposes the Diminished Reality (DR) method for an acceleration stimulus to reduce motion sickness for the autonomous vehicle. One of the main causes of motion sickness is a repeated acceleration. In order to diminish the acceleration stimulus in the autonomous vehicle, vection illusion is used to induce the user to make a preliminary movement against the real acceleration. The Balance Wii Board is used to measure participant's movement of the center of gravity to verify the effectiveness of the method with vection. The experimental result of 9 participants shows that the proposed method of using vection could reduce acceleration stimulus compared with the conventional method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an approach for motion sickness reduction while riding an autonomous vehicle. It proposes the Diminished Reality (DR) method for an acceleration stimulus to reduce motion sickness for the autonomous vehicle. One of the main causes of motion sickness is a repeated acceleration. In order to diminish the acceleration stimulus in the autonomous vehicle, vection illusion is used to induce the user to make a preliminary movement against the real acceleration. The Balance Wii Board is used to measure participant's movement of the center of gravity to verify the effectiveness of the method with vection. The experimental result of 9 participants shows that the proposed method of using vection could reduce acceleration stimulus compared with the conventional method.",
"fno": "07892284",
"keywords": [
"Acceleration",
"Gravity",
"Autonomous Vehicles",
"Motion Measurement",
"Virtual Reality",
"Visualization",
"H 5 1 Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Nara Institute of Science and Technology, Japan",
"fullName": "Taishi Sawabe",
"givenName": "Taishi",
"surname": "Sawabe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology, Japan",
"fullName": "Masayuki Kanbara",
"givenName": "Masayuki",
"surname": "Kanbara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology, Advanced Telecommunications Research Institute, Japan",
"fullName": "Norihiro Hagita",
"givenName": "Norihiro",
"surname": "Hagita",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "277-278",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892283",
"articleId": "12OmNyvY9zc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892285",
"articleId": "12OmNxFaLhK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892307",
"title": "Air cushion: A pilot study of the passive technique to mitigate simulator sickness by responding to vection",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892307/12OmNClQ0yz",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2018/3649/0/364901a653",
"title": "Motion Control Block Implementation for Driving Computing System",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2018/364901a653/12OmNwF0C5g",
"parentPublication": {
"id": "proceedings/bigcomp/2018/3649/0",
"title": "2018 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492799",
"title": "Virtual acceleration with galvanic vestibular stimulation in a virtual reality environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492799/12OmNwJPMZr",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836521",
"title": "Diminished Reality for Acceleration — Motion Sickness Reduction with Vection for Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836521/12OmNyo1nR0",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480760",
"title": "Circular, Linear, and Curvilinear Vection in a Large-screen Virtual Environment with Floor Projection",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480760/12OmNzAoi4A",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08267239",
"title": "Towards a Machine-Learning Approach for Sickness Prediction in 360° Stereoscopic Videos",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08267239/13rRUyYSWt3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2019/7789/0/08679433",
"title": "Hardware Acceleration Technology for Deep-Learning in Autonomous Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2019/08679433/18XkmEGooy4",
"parentPublication": {
"id": "proceedings/bigcomp/2019/7789/0",
"title": "2019 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699182",
"title": "International Workshop on Comfort Intelligence with AR for Autonomous Vehicle 2018",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699182/19F1R26pH0Y",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a169",
"title": "Development of VR Motion Sickness Test Platform Based on UE",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a342",
"title": "Video Generation Unconsciously Evoking Pre-Motion to Passengers in Automated Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a342/1J7WjXmrHLa",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwwMf3H",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismarw",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyo1nR0",
"doi": "10.1109/ISMAR-Adjunct.2016.0100",
"title": "Diminished Reality for Acceleration — Motion Sickness Reduction with Vection for Autonomous Driving",
"normalizedTitle": "Diminished Reality for Acceleration — Motion Sickness Reduction with Vection for Autonomous Driving",
"abstract": "This paper proposes the Diminished Reality (DR) method for an acceleration stimulus to reduce motion sickness for an autonomous vehicle by presenting vection for user before the real acceleration occurs. The technology of an autonomous vehicle has been rapidly developed in all over the world. Instead of controlling vehicle by passenger themselves, the autonomous system helps them acceleration and deceleration controls and safety controls as well. However, it is predictable that the number of passengers who get motion sickness increases because they receive an unexpected acceleration stimulus for the autonomous driving.In the field of the Virtual Reality, the technology to create virtual acceleration stimulus to the passenger in the driving simulator or flight simulator have been developed. However, our approach for using pseudo to reduce the acceleration stimulus which occurs in autonomous driving to prevent the motion sickness is the opposite problem of these conventional VR researches. The real acceleration stimulus from the vehicle is reduced by presenting vection before the real acceleration occurs.In this research, we demonstrate the idea of technology that use the vection with an augmented reality system to reduce an effect of the real acceleration in the vehicle which mainly the factor for the motion sickness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes the Diminished Reality (DR) method for an acceleration stimulus to reduce motion sickness for an autonomous vehicle by presenting vection for user before the real acceleration occurs. The technology of an autonomous vehicle has been rapidly developed in all over the world. Instead of controlling vehicle by passenger themselves, the autonomous system helps them acceleration and deceleration controls and safety controls as well. However, it is predictable that the number of passengers who get motion sickness increases because they receive an unexpected acceleration stimulus for the autonomous driving.In the field of the Virtual Reality, the technology to create virtual acceleration stimulus to the passenger in the driving simulator or flight simulator have been developed. However, our approach for using pseudo to reduce the acceleration stimulus which occurs in autonomous driving to prevent the motion sickness is the opposite problem of these conventional VR researches. The real acceleration stimulus from the vehicle is reduced by presenting vection before the real acceleration occurs.In this research, we demonstrate the idea of technology that use the vection with an augmented reality system to reduce an effect of the real acceleration in the vehicle which mainly the factor for the motion sickness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes the Diminished Reality (DR) method for an acceleration stimulus to reduce motion sickness for an autonomous vehicle by presenting vection for user before the real acceleration occurs. The technology of an autonomous vehicle has been rapidly developed in all over the world. Instead of controlling vehicle by passenger themselves, the autonomous system helps them acceleration and deceleration controls and safety controls as well. However, it is predictable that the number of passengers who get motion sickness increases because they receive an unexpected acceleration stimulus for the autonomous driving.In the field of the Virtual Reality, the technology to create virtual acceleration stimulus to the passenger in the driving simulator or flight simulator have been developed. However, our approach for using pseudo to reduce the acceleration stimulus which occurs in autonomous driving to prevent the motion sickness is the opposite problem of these conventional VR researches. The real acceleration stimulus from the vehicle is reduced by presenting vection before the real acceleration occurs.In this research, we demonstrate the idea of technology that use the vection with an augmented reality system to reduce an effect of the real acceleration in the vehicle which mainly the factor for the motion sickness.",
"fno": "07836521",
"keywords": [
"Aerospace Simulation",
"Augmented Reality",
"Autonomous Aerial Vehicles",
"Augmented Reality",
"Flight Simulator",
"Driving Simulator",
"Virtual Reality",
"Autonomous Vehicle",
"Acceleration Stimulus",
"Autonomous Driving",
"Vection",
"Motion Sickness Reduction",
"Diminished Reality",
"Acceleration",
"Gravity",
"Autonomous Vehicles",
"Safety",
"Resists",
"Motion Measurement"
],
"authors": [
{
"affiliation": null,
"fullName": "Taishi Sawabe",
"givenName": "Taishi",
"surname": "Sawabe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Masayuki Kanbara",
"givenName": "Masayuki",
"surname": "Kanbara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Norihiro Hagita",
"givenName": "Norihiro",
"surname": "Hagita",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismarw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-09-01T00:00:00",
"pubType": "proceedings",
"pages": "297-299",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3740-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07836520",
"articleId": "12OmNAYGlBY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07836522",
"articleId": "12OmNxETajV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isorc/2018/5847/0/584701a130",
"title": "Intrusion-Tolerant Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/isorc/2018/584701a130/12OmNARAndt",
"parentPublication": {
"id": "proceedings/isorc/2018/5847/0",
"title": "2018 IEEE 21st International Symposium on Real-Time Distributed Computing (ISORC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892284",
"title": "Diminished reality for acceleration stimulus: Motion sickness reduction with vection for autonomous driving",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892284/12OmNwx3QdZ",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480760",
"title": "Circular, Linear, and Curvilinear Vection in a Large-screen Virtual Environment with Floor Projection",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480760/12OmNzAoi4A",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08267239",
"title": "Towards a Machine-Learning Approach for Sickness Prediction in 360° Stereoscopic Videos",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08267239/13rRUyYSWt3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2019/7789/0/08679433",
"title": "Hardware Acceleration Technology for Deep-Learning in Autonomous Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2019/08679433/18XkmEGooy4",
"parentPublication": {
"id": "proceedings/bigcomp/2019/7789/0",
"title": "2019 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699194",
"title": "Comfort Intelligence for Autonomous Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699194/19F1NbD5DMs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699182",
"title": "International Workshop on Comfort Intelligence with AR for Autonomous Vehicle 2018",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699182/19F1R26pH0Y",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2019/01/08705036",
"title": "A User Study of Semi-Autonomous and Autonomous Highway Driving: An Interactive Simulation Study",
"doi": null,
"abstractUrl": "/magazine/pc/2019/01/08705036/19HKIG47q3m",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icst/2020/5778/0/09159061",
"title": "Generating Avoidable Collision Scenarios for Testing Autonomous Driving Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icst/2020/09159061/1m3oPFVKuZ2",
"parentPublication": {
"id": "proceedings/icst/2020/5778/0",
"title": "2020 IEEE 13th International Conference on Software Testing, Validation and Verification (ICST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icst/2021/6836/0/683600a295",
"title": "Targeting Patterns of Driving Characteristics in Testing Autonomous Driving Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icst/2021/683600a295/1tRPahZVpE4",
"parentPublication": {
"id": "proceedings/icst/2021/6836/0",
"title": "2021 14th IEEE Conference on Software Testing, Verification and Validation (ICST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1f3HvEJ",
"doi": "10.1109/VR.2018.8446382",
"title": "Please Don't Puke: Early Detection of Severe Motion Sickness in VR",
"normalizedTitle": "Please Don't Puke: Early Detection of Severe Motion Sickness in VR",
"abstract": "Motion sickness is a potentially debilitating side effect experienced by certain users of virtual reality systems. Unexpected results from a user study on redirected walking suggest that there is a need to quickly identify participants who have an extremely low tolerance for virtual motion manipulations and remove them from the experience. In this poster, we investigate the use of a previously introduced “fast motion sickness” measure to identify potential outliers with heightened levels of sensitivity. This work demonstrates a promising experimental methodology and suggests possible shared characteristics among users in this group.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Motion sickness is a potentially debilitating side effect experienced by certain users of virtual reality systems. Unexpected results from a user study on redirected walking suggest that there is a need to quickly identify participants who have an extremely low tolerance for virtual motion manipulations and remove them from the experience. In this poster, we investigate the use of a previously introduced “fast motion sickness” measure to identify potential outliers with heightened levels of sensitivity. This work demonstrates a promising experimental methodology and suggests possible shared characteristics among users in this group.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Motion sickness is a potentially debilitating side effect experienced by certain users of virtual reality systems. Unexpected results from a user study on redirected walking suggest that there is a need to quickly identify participants who have an extremely low tolerance for virtual motion manipulations and remove them from the experience. In this poster, we investigate the use of a previously introduced “fast motion sickness” measure to identify potential outliers with heightened levels of sensitivity. This work demonstrates a promising experimental methodology and suggests possible shared characteristics among users in this group.",
"fno": "08446382",
"keywords": [
"Human Factors",
"Virtual Reality",
"Severe Motion Sickness",
"VR",
"Potentially Debilitating Side Effect",
"Virtual Reality Systems",
"User Study",
"Redirected Walking",
"Virtual Motion Manipulations",
"Potential Outliers",
"Fast Motion Sickness Measure",
"Frequency Modulation",
"Calibration",
"Electronic Mail",
"Virtual Environments",
"Tutorials",
"Headphones",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality",
"Human Centered Computing Human Computer Interaction HCI HCI Design And Evaluation Methods User Studies"
],
"authors": [
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Courtney Hutton",
"givenName": "Courtney",
"surname": "Hutton",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lewis & Clark College",
"fullName": "Shelby Ziccardi",
"givenName": "Shelby",
"surname": "Ziccardi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Harvey Mudd College",
"fullName": "Julio Medina",
"givenName": "Julio",
"surname": "Medina",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Evan Suma Rosenbarg",
"givenName": "Evan Suma",
"surname": "Rosenbarg",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "579-580",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446311",
"articleId": "13bd1fdV4l0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446622",
"articleId": "13bd1gzWkRj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460053",
"title": "Combating VR sickness through subtle dynamic field-of-view modification",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460053/12OmNBubORd",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446210",
"title": "Visually-Induced Motion Sickness Reduction via Static and Dynamic Rest Frames",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446210/13bd1fZBGcM",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07384536",
"title": "Examining Rotation Gain in CAVE-like Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07384536/13rRUxOdD2H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08267239",
"title": "Towards a Machine-Learning Approach for Sickness Prediction in 360° Stereoscopic Videos",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08267239/13rRUyYSWt3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a094",
"title": "An EEG-based Experiment on VR Sickness and Postural Instability While Walking in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a094/1MNgWtYsR5S",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798213",
"title": "VR Sickness Prediction for Navigation in Immersive Virtual Environments using a Deep Long Short Term Memory Model",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798213/1cJ0RYruJIA",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a178",
"title": "Redirected Tilting: Eliciting Postural Changes with a Rotational Self-Motion Illusion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a178/1tnWQaG0jyo",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a426",
"title": "Is Virtual Reality Sickness Elicited by Illusory Motion Affected by Gender and Prior Video Gaming Experience?",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a426/1tnXYDa4Wcg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKisA",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45W2Wyyv",
"doi": "10.1109/AIVR.2018.00032",
"title": "Machine Learning Architectures to Predict Motion Sickness Using a Virtual Reality Rollercoaster Simulation Tool",
"normalizedTitle": "Machine Learning Architectures to Predict Motion Sickness Using a Virtual Reality Rollercoaster Simulation Tool",
"abstract": "Virtual Reality (VR) can cause an unprecedented immersion and feeling of presence yet a lot of users experience motion sickness when moving through a virtual environment. Rollercoaster rides are popular in Virtual Reality but have to be well designed to limit the amount of nausea the user may feel. This paper describes a novel framework to get automated ratings on motion sickness using Neural Networks. An application that lets users create rollercoasters directly in VR, share them with other users and ride and rate them is used to gather real-time data related to the in-game behaviour of the player, the track itself and users' ratings based on a Simulator Sickness Questionnaire (SSQ) integrated into the application. Machine learning architectures based on deep neural networks are trained using this data aiming to predict motion sickness levels. While this paper focuses on rollercoasters this framework could help to rate any VR application on motion sickness and intensity that involves camera movement. A new well defined dataset is provided in this paper and the performance of the proposed architectures are evaluated in a comparative study.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality (VR) can cause an unprecedented immersion and feeling of presence yet a lot of users experience motion sickness when moving through a virtual environment. Rollercoaster rides are popular in Virtual Reality but have to be well designed to limit the amount of nausea the user may feel. This paper describes a novel framework to get automated ratings on motion sickness using Neural Networks. An application that lets users create rollercoasters directly in VR, share them with other users and ride and rate them is used to gather real-time data related to the in-game behaviour of the player, the track itself and users' ratings based on a Simulator Sickness Questionnaire (SSQ) integrated into the application. Machine learning architectures based on deep neural networks are trained using this data aiming to predict motion sickness levels. While this paper focuses on rollercoasters this framework could help to rate any VR application on motion sickness and intensity that involves camera movement. A new well defined dataset is provided in this paper and the performance of the proposed architectures are evaluated in a comparative study.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality (VR) can cause an unprecedented immersion and feeling of presence yet a lot of users experience motion sickness when moving through a virtual environment. Rollercoaster rides are popular in Virtual Reality but have to be well designed to limit the amount of nausea the user may feel. This paper describes a novel framework to get automated ratings on motion sickness using Neural Networks. An application that lets users create rollercoasters directly in VR, share them with other users and ride and rate them is used to gather real-time data related to the in-game behaviour of the player, the track itself and users' ratings based on a Simulator Sickness Questionnaire (SSQ) integrated into the application. Machine learning architectures based on deep neural networks are trained using this data aiming to predict motion sickness levels. While this paper focuses on rollercoasters this framework could help to rate any VR application on motion sickness and intensity that involves camera movement. A new well defined dataset is provided in this paper and the performance of the proposed architectures are evaluated in a comparative study.",
"fno": "926900a153",
"keywords": [
"Computer Simulation",
"Entertainment",
"Human Factors",
"Learning Artificial Intelligence",
"Neural Nets",
"Virtual Reality",
"Unprecedented Immersion",
"Virtual Environment",
"Rollercoaster Rides",
"Automated Ratings",
"Rollercoasters",
"Deep Neural Networks",
"Motion Sickness Levels",
"VR Application",
"Simulator Sickness Questionnaire",
"Virtual Reality Rollercoaster Simulation Tool",
"SSQ",
"VR",
"Neural Networks",
"Virtual Reality",
"Force",
"Biological Neural Networks",
"Databases",
"Computer Architecture",
"Games",
"Virtual Reality",
"Motion Sickness",
"Neural Networks",
"Rollercoasters"
],
"authors": [
{
"affiliation": null,
"fullName": "Stefan Hell",
"givenName": "Stefan",
"surname": "Hell",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Vasileios Argyriou",
"givenName": "Vasileios",
"surname": "Argyriou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-12-01T00:00:00",
"pubType": "proceedings",
"pages": "153-156",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-9269-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "926900a149",
"articleId": "17D45XwUAH2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "926900a157",
"articleId": "17D45WZZ7Fb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892307",
"title": "Air cushion: A pilot study of the passive technique to mitigate simulator sickness by responding to vection",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892307/12OmNClQ0yz",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a542",
"title": "Resolution Tradeoff in Gameplay Experience, Performance, and Simulator Sickness in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a542/1CJcAVYrJew",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2022/02/09779506",
"title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset",
"doi": null,
"abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a169",
"title": "Development of VR Motion Sickness Test Platform Based on UE",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798297",
"title": "Unifying Research to Address Motion Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798297/1cJ13JSUePK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798158",
"title": "PhantomLegs: Reducing Virtual Reality Sickness Using Head-Worn Haptic Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798158/1cJ16zT3GdW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412423",
"title": "VR Sickness Assessment with Perception Prior and Hybrid Temporal Features",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412423/1tmiMP82mre",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a426",
"title": "Is Virtual Reality Sickness Elicited by Illusory Motion Affected by Gender and Prior Video Gaming Experience?",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a426/1tnXYDa4Wcg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0GMB2sV2",
"doi": "10.1109/VR.2019.8798291",
"title": "Assessing Media QoE, Simulator Sickness and Presence for Omnidirectional Videos with Different Test Protocols",
"normalizedTitle": "Assessing Media QoE, Simulator Sickness and Presence for Omnidirectional Videos with Different Test Protocols",
"abstract": "QoE for omnidirectional videos comprises additional components such as simulator sickness and presence. In this paper, a series of tests is presented comparing different test protocols to assess integral quality, simulator sickness and presence for omnidirectional videos in one test run, using the HTC Vive Pro as head-mounted display. For quality ratings, the five-point ACR scale was used. In addition, the well-established Simulator Sickness Questionnaire and Presence Questionnaire methods were used, once in a full version, and once with only one single integral scale, to analyze how well presence and simulator sickness can be captured using only a single scale.",
"abstracts": [
{
"abstractType": "Regular",
"content": "QoE for omnidirectional videos comprises additional components such as simulator sickness and presence. In this paper, a series of tests is presented comparing different test protocols to assess integral quality, simulator sickness and presence for omnidirectional videos in one test run, using the HTC Vive Pro as head-mounted display. For quality ratings, the five-point ACR scale was used. In addition, the well-established Simulator Sickness Questionnaire and Presence Questionnaire methods were used, once in a full version, and once with only one single integral scale, to analyze how well presence and simulator sickness can be captured using only a single scale.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "QoE for omnidirectional videos comprises additional components such as simulator sickness and presence. In this paper, a series of tests is presented comparing different test protocols to assess integral quality, simulator sickness and presence for omnidirectional videos in one test run, using the HTC Vive Pro as head-mounted display. For quality ratings, the five-point ACR scale was used. In addition, the well-established Simulator Sickness Questionnaire and Presence Questionnaire methods were used, once in a full version, and once with only one single integral scale, to analyze how well presence and simulator sickness can be captured using only a single scale.",
"fno": "08798291",
"keywords": [
"Helmet Mounted Displays",
"Human Factors",
"Virtual Reality",
"Media Qo E",
"Omnidirectional Videos",
"Simulator Sickness Questionnaire",
"Presence Questionnaire Methods",
"Integral Quality",
"HTC Vive Pro",
"Head Mounted Display",
"Videos",
"Video Sequences",
"Quality Of Experience",
"Correlation",
"Protocols",
"Media",
"Head Mounted Displays",
"Simulator Sickness",
"Presence",
"Qo E",
"360 X 00 B 0 Videos"
],
"authors": [
{
"affiliation": "Audiovisual Technology Group (AVT), Technical University of Ilmenau, Germany",
"fullName": "Ashutosh Singla",
"givenName": "Ashutosh",
"surname": "Singla",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Audiovisual Technology Group (AVT), Technical University of Ilmenau, Germany",
"fullName": "Rakesh Rao Ramachandra Rao",
"givenName": "Rakesh Rao Ramachandra",
"surname": "Rao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Audiovisual Technology Group (AVT), Technical University of Ilmenau, Germany",
"fullName": "Steve Göring",
"givenName": "Steve",
"surname": "Göring",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Audiovisual Technology Group (AVT), Technical University of Ilmenau, Germany",
"fullName": "Alexander Raake",
"givenName": "Alexander",
"surname": "Raake",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1163-1164",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798342",
"articleId": "1cJ0OpjGOkw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798193",
"articleId": "1cJ0USQZTmU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2002/1492/0/14920164",
"title": "Effects of Field of View on Presence, Enjoyment, Memory, and Simulator Sickness in a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920164/12OmNvUsoqB",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/03/v0422",
"title": "Demand Characteristics in Assessing Motion Sickness in a Virtual Environment: Or Does Taking a Motion Sickness Questionnaire Make You Sick?",
"doi": null,
"abstractUrl": "/journal/tg/2007/03/v0422/13rRUxASuht",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a542",
"title": "Resolution Tradeoff in Gameplay Experience, Performance, and Simulator Sickness in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a542/1CJcAVYrJew",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08798880",
"title": "Sick Moves! Motion Parameters as Indicators of Simulator Sickness",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08798880/1cumZbd4qNG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090670",
"title": "SiSiMo: Towards Simulator Sickness Modeling for 360<sup>°</sup> Videos Viewed with an HMD",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090670/1jIxwAw9Z9C",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090490",
"title": "Evaluation of Simulator Sickness for 360° Videos on an HMD Subject to Participants’ Experience with Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090490/1jIxwgIdgsw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090456",
"title": "On the Effect of Standing and Seated Viewing of 360° Videos on Subjective Quality Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090456/1jIxyayiDp6",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09124686",
"title": "Stimulus Sampling With 360-Videos: Examining Head Movements, Arousal, Presence, Simulator Sickness, and Preference on a Large Sample of Participants and Videos",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09124686/1kVbwGkgqYg",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a198",
"title": "Assessment of the Simulator Sickness Questionnaire for Omnidirectional Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a198/1tuB40QFm92",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxwAw9Z9C",
"doi": "10.1109/VRW50115.2020.00142",
"title": "SiSiMo: Towards Simulator Sickness Modeling for 360<sup>°</sup> Videos Viewed with an HMD",
"normalizedTitle": "SiSiMo: Towards Simulator Sickness Modeling for 360° Videos Viewed with an HMD",
"abstract": "Users may experience symptoms of simulator sickness while watching 360<sup>°</sup> /VR videos with Head-Mounted Displays (HMDs). At present, practically no solution exists that can efficiently eradicate the symptoms of simulator sickness from virtual environments. Therefore, in the absence of a solution, it is required to at least quantify the amount of sickness. In this paper, we present initial work on our Simulator Sickness Model SiSiMo including a first component to predict simulator sickness scores over time. Using linear regression of short term scores already shows promising performance for predicting the scores collected from a number of user tests.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Users may experience symptoms of simulator sickness while watching 360<sup>°</sup> /VR videos with Head-Mounted Displays (HMDs). At present, practically no solution exists that can efficiently eradicate the symptoms of simulator sickness from virtual environments. Therefore, in the absence of a solution, it is required to at least quantify the amount of sickness. In this paper, we present initial work on our Simulator Sickness Model SiSiMo including a first component to predict simulator sickness scores over time. Using linear regression of short term scores already shows promising performance for predicting the scores collected from a number of user tests.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Users may experience symptoms of simulator sickness while watching 360° /VR videos with Head-Mounted Displays (HMDs). At present, practically no solution exists that can efficiently eradicate the symptoms of simulator sickness from virtual environments. Therefore, in the absence of a solution, it is required to at least quantify the amount of sickness. In this paper, we present initial work on our Simulator Sickness Model SiSiMo including a first component to predict simulator sickness scores over time. Using linear regression of short term scores already shows promising performance for predicting the scores collected from a number of user tests.",
"fno": "09090670",
"keywords": [
"Videos",
"Resists",
"Solid Modeling",
"Indexes",
"Video Sequences",
"Predictive Models",
"Conferences",
"Cybersickness",
"360 X 00 B 0 C",
"Video",
"Sickness Predictor"
],
"authors": [
{
"affiliation": "Audiovisual Technology Group,TU Ilmenau,Germany",
"fullName": "Alexander Raake",
"givenName": "Alexander",
"surname": "Raake",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Audiovisual Technology Group,TU Ilmenau,Germany",
"fullName": "Ashutosh Singla",
"givenName": "Ashutosh",
"surname": "Singla",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Audiovisual Technology Group,TU Ilmenau,Germany",
"fullName": "Rakesh Rao Ramachandra Rao",
"givenName": "Rakesh Rao Ramachandra",
"surname": "Rao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Audiovisual Technology Group,TU Ilmenau,Germany",
"fullName": "Werner Robitza",
"givenName": "Werner",
"surname": "Robitza",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Audiovisual Technology Group,TU Ilmenau,Germany",
"fullName": "Frank Hofmeyer",
"givenName": "Frank",
"surname": "Hofmeyer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "582-583",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090630",
"articleId": "1jIxtbZL30Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090628",
"articleId": "1jIxpR1CPOo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2009/3943/0/04811037",
"title": "Real Walking Increases Simulator Sickness in Navigationally Complex Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811037/12OmNAoDilQ",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836521",
"title": "Diminished Reality for Acceleration — Motion Sickness Reduction with Vection for Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836521/12OmNyo1nR0",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2022/02/09779506",
"title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset",
"doi": null,
"abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798291",
"title": "Assessing Media QoE, Simulator Sickness and Presence for Omnidirectional Videos with Different Test Protocols",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798291/1cJ0GMB2sV2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798158",
"title": "PhantomLegs: Reducing Virtual Reality Sickness Using Head-Worn Haptic Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798158/1cJ16zT3GdW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798136",
"title": "VR Sickness in Continuous Exposure to Live-action 180°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798136/1cJ1gPJX2og",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090490",
"title": "Evaluation of Simulator Sickness for 360° Videos on an HMD Subject to Participants’ Experience with Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090490/1jIxwgIdgsw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090456",
"title": "On the Effect of Standing and Seated Viewing of 360° Videos on Subjective Quality Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090456/1jIxyayiDp6",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09551731",
"title": "Learning from Deep Stereoscopic Attention for Simulator Sickness Prediction",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09551731/1xgx3DIeexq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvkYx8t",
"title": "2011 44th Hawaii International Conference on System Sciences",
"acronym": "hicss",
"groupId": "1000730",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAsBFG4",
"doi": "10.1109/HICSS.2011.329",
"title": "Online Identity Construction and Expectation of Future Interaction",
"normalizedTitle": "Online Identity Construction and Expectation of Future Interaction",
"abstract": "While the growing popularity of social network sites (SNSs) reflects a desire for individuals to move their offline networks to an online space, there are a number of organizational and social settings in which online interactions precede offline meetings. When this happens, interactants may only have limited information about their partners on which to make judgments. Avatars can provide important cues to a person's identity, such as likes, appearance, or personality. The present study tests whether the type of anticipated future interaction (i.e., online or face-to-face) moderates the avatar creation process. Findings from an experiment indicate that any expectation of future interaction impacts attractiveness and similarity ratings of an avatar in comparison to the self, while text- based avatar descriptions vary according to the modality of expected future interaction. Results are discussed as they apply to hyperpersonal model of computer-mediated communication [3].",
"abstracts": [
{
"abstractType": "Regular",
"content": "While the growing popularity of social network sites (SNSs) reflects a desire for individuals to move their offline networks to an online space, there are a number of organizational and social settings in which online interactions precede offline meetings. When this happens, interactants may only have limited information about their partners on which to make judgments. Avatars can provide important cues to a person's identity, such as likes, appearance, or personality. The present study tests whether the type of anticipated future interaction (i.e., online or face-to-face) moderates the avatar creation process. Findings from an experiment indicate that any expectation of future interaction impacts attractiveness and similarity ratings of an avatar in comparison to the self, while text- based avatar descriptions vary according to the modality of expected future interaction. Results are discussed as they apply to hyperpersonal model of computer-mediated communication [3].",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While the growing popularity of social network sites (SNSs) reflects a desire for individuals to move their offline networks to an online space, there are a number of organizational and social settings in which online interactions precede offline meetings. When this happens, interactants may only have limited information about their partners on which to make judgments. Avatars can provide important cues to a person's identity, such as likes, appearance, or personality. The present study tests whether the type of anticipated future interaction (i.e., online or face-to-face) moderates the avatar creation process. Findings from an experiment indicate that any expectation of future interaction impacts attractiveness and similarity ratings of an avatar in comparison to the self, while text- based avatar descriptions vary according to the modality of expected future interaction. Results are discussed as they apply to hyperpersonal model of computer-mediated communication [3].",
"fno": "05718718",
"keywords": [
"Avatars",
"Human Computer Interaction",
"Social Networking Online",
"Online Identity Construction",
"Social Network Sites",
"Online Interaction",
"Avatar Creation Process",
"Computer Mediated Communication",
"Text Based Avatar Description",
"Avatars",
"Face",
"Particle Measurements",
"Color",
"Atmospheric Measurements",
"Hair",
"Distance Measurement"
],
"authors": [
{
"affiliation": null,
"fullName": "Caitlin McLaughlin",
"givenName": "Caitlin",
"surname": "McLaughlin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jessica Vitak",
"givenName": "Jessica",
"surname": "Vitak",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Julia Crouse",
"givenName": "Julia",
"surname": "Crouse",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2011-01-01T00:00:00",
"pubType": "proceedings",
"pages": "1-10",
"year": "2011",
"issn": "1530-1605",
"isbn": "978-1-4244-9618-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05718717",
"articleId": "12OmNypIYEZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05718719",
"articleId": "12OmNCxtyKU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892272",
"title": "The effect of lip and arm synchronization on embodiment: A pilot study",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892272/12OmNBqdr3B",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223377",
"title": "Avatar embodiment realism and virtual fitness training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223377/12OmNCcKQFn",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223369",
"title": "Human-avatar interaction and recognition memory according to interaction types and methods",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223369/12OmNvEQsfz",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344688",
"title": "Definitions of engagement in human-agent interaction",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344688/12OmNwEJ0Pk",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504761",
"title": "Avatar realism and social interaction quality in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260949",
"title": "The Effect of Gender Body-Swap Illusions on Working Memory and Stereotype Threat",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260949/13rRUynHujg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2021/3734/0/373400a204",
"title": "Social Interaction in Virtual Shopping",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a204/1A3j9ceXwC4",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090457",
"title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/10/09105074",
"title": "Virtual Co-Embodiment: Evaluation of the Sense of Agency While Sharing the Control of a Virtual Body Among Two Individuals",
"doi": null,
"abstractUrl": "/journal/tg/2021/10/09105074/1kj0SvEe6ly",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqGA5im",
"title": "2014 IEEE Frontiers in Education Conference (FIE)",
"acronym": "fie",
"groupId": "1000297",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvk7JLh",
"doi": "10.1109/FIE.2014.7044208",
"title": "Special session — \"Stereotype threat\" and my students: What can I do about it?",
"normalizedTitle": "Special session — \"Stereotype threat\" and my students: What can I do about it?",
"abstract": "Stereotype threat occurs when one is at risk of confirming a negative stereotype about a social group that one belongs to. In the academic setting, research has shown that stereotype threat contributes to the achievement gap noted in underrepresented students in engineering classrooms. Participants in this FIE special session define and discuss stereotype threat, explore interventions that research has found to successfully reduce stereotype threat, and identify one strategy to try in the classroom. Key resources are identified including research literature, websites and videos, and recommendations for intervention strategies. In addition to the literature, participants will have access to strategies developed by other participants that can be used by instructors to mitigate stereotype threats.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Stereotype threat occurs when one is at risk of confirming a negative stereotype about a social group that one belongs to. In the academic setting, research has shown that stereotype threat contributes to the achievement gap noted in underrepresented students in engineering classrooms. Participants in this FIE special session define and discuss stereotype threat, explore interventions that research has found to successfully reduce stereotype threat, and identify one strategy to try in the classroom. Key resources are identified including research literature, websites and videos, and recommendations for intervention strategies. In addition to the literature, participants will have access to strategies developed by other participants that can be used by instructors to mitigate stereotype threats.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Stereotype threat occurs when one is at risk of confirming a negative stereotype about a social group that one belongs to. In the academic setting, research has shown that stereotype threat contributes to the achievement gap noted in underrepresented students in engineering classrooms. Participants in this FIE special session define and discuss stereotype threat, explore interventions that research has found to successfully reduce stereotype threat, and identify one strategy to try in the classroom. Key resources are identified including research literature, websites and videos, and recommendations for intervention strategies. In addition to the literature, participants will have access to strategies developed by other participants that can be used by instructors to mitigate stereotype threats.",
"fno": "07044208",
"keywords": [
"Engineering Education",
"Educational Institutions",
"Physics",
"Psychology",
"Bibliographies",
"Google",
"Intervention Strategies For The Achievement Gap Introduction",
"Stereotype Threat",
"Inclusive Engineering Classrooms",
"Practical Applications In STEM Classrooms"
],
"authors": [
{
"affiliation": "Environmental Resources Engineering, Sociology, Humboldt State University, Arcata, CA, USA",
"fullName": "Elizabeth A. Eschenbach",
"givenName": "Elizabeth A.",
"surname": "Eschenbach",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Environmental Resources Engineering, Sociology, Humboldt State University, Arcata, CA, USA",
"fullName": "Mary Virnoche",
"givenName": "Mary",
"surname": "Virnoche",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Michelle Madsen Camacho, Electrical Engineering, Sociology, University of San Diego, San Diego, CAUSA",
"fullName": "Susan M. Lord",
"givenName": "Susan M.",
"surname": "Lord",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-3",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-3922-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07044207",
"articleId": "12OmNxEBz3P",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07044209",
"articleId": "12OmNqIhFXo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2012/1353/0/06462384",
"title": "Special session: Race and the idea of privilege in the engineering classroom",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2012/06462384/12OmNApLGsE",
"parentPublication": {
"id": "proceedings/fie/2012/1353/0",
"title": "2012 Frontiers in Education Conference Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2014/3922/0/07044011",
"title": "Proven practices that can reduce stereotype threat in engineering education: A literature review",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2014/07044011/12OmNCbU2Vv",
"parentPublication": {
"id": "proceedings/fie/2014/3922/0",
"title": "2014 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2006/0256/0/04116956",
"title": "Panel Session - Future of FIE: Where are we and where do we want to go?",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2006/04116956/12OmNrFTr8V",
"parentPublication": {
"id": "proceedings/fie/2006/0256/0",
"title": "Proceedings. Frontiers in Education. 36th Annual Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2006/0256/0/04117286",
"title": "Classroom Border Crossings: Incorporating Feminist and Liberative Pedagogies in Your CSET Classroom",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2006/04117286/12OmNvAAtBW",
"parentPublication": {
"id": "proceedings/fie/2006/0256/0",
"title": "Proceedings. Frontiers in Education. 36th Annual Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2012/1353/0/06462439",
"title": "Special session: What do student-generated diagrams say about their understanding?: developmental trajectories of model-based reasoning in engineering students",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2012/06462439/12OmNwx3QaK",
"parentPublication": {
"id": "proceedings/fie/2012/1353/0",
"title": "2012 Frontiers in Education Conference Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/spw/2014/5103/0/5103a214",
"title": "Understanding Insider Threat: A Framework for Characterising Attacks",
"doi": null,
"abstractUrl": "/proceedings-article/spw/2014/5103a214/12OmNxuo0i3",
"parentPublication": {
"id": "proceedings/spw/2014/5103/0",
"title": "2014 IEEE Security and Privacy Workshops (SPW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2013/5062/0/06657151",
"title": "Threats to Peace: Threat Perception and the Persistence or Desistance of Violent Conflict",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2013/06657151/12OmNyoSbg6",
"parentPublication": {
"id": "proceedings/eisic/2013/5062/0",
"title": "2013 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260949",
"title": "The Effect of Gender Body-Swap Illusions on Working Memory and Stereotype Threat",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260949/13rRUynHujg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08658648",
"title": "Special session: Put me in coach! Developing a design playbook for instructors to help engineering students do design",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08658648/18j97qhAL5u",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2022/5537/0/553700a487",
"title": "Preliminary Analysis of the Influence of the Stereotype Threat on Computer Programming",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2022/553700a487/1KOvi3RKoBG",
"parentPublication": {
"id": "proceedings/apsec/2022/5537/0",
"title": "2022 29th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCcbEdf",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"acronym": "acii",
"groupId": "1002992",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzZmZBE",
"doi": "10.1109/ACII.2017.8273657",
"title": "Avatar and participant gender differences in the perception of uncanniness of virtual humans",
"normalizedTitle": "Avatar and participant gender differences in the perception of uncanniness of virtual humans",
"abstract": "The widespread use of avatars in training & simulation has expanded from entertainers to filling more serious roles. This change has emerged from the need to develop cost-effective & customizable avatars for interaction with trainees. While the use of avatars continues to expand, issues surrounding the impact of individual trainee factors on training outcomes, & how the design implications for avatars presented may interact with these factors, is not fully understood. Also, the uncanny valley has yet to be resolved, which may impair users' perception & acceptance of avatars & associated training scenarios. Gender has emerged as an important consideration when designing avatars, both in terms of gender differences in trainee perceptions, & the impact of avatars gender on these perceptions & experiences. The startle response of participants is measured to determine the participants' affective response to how pleasant the avatar is perceived, to ensure positive training outcomes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The widespread use of avatars in training & simulation has expanded from entertainers to filling more serious roles. This change has emerged from the need to develop cost-effective & customizable avatars for interaction with trainees. While the use of avatars continues to expand, issues surrounding the impact of individual trainee factors on training outcomes, & how the design implications for avatars presented may interact with these factors, is not fully understood. Also, the uncanny valley has yet to be resolved, which may impair users' perception & acceptance of avatars & associated training scenarios. Gender has emerged as an important consideration when designing avatars, both in terms of gender differences in trainee perceptions, & the impact of avatars gender on these perceptions & experiences. The startle response of participants is measured to determine the participants' affective response to how pleasant the avatar is perceived, to ensure positive training outcomes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The widespread use of avatars in training & simulation has expanded from entertainers to filling more serious roles. This change has emerged from the need to develop cost-effective & customizable avatars for interaction with trainees. While the use of avatars continues to expand, issues surrounding the impact of individual trainee factors on training outcomes, & how the design implications for avatars presented may interact with these factors, is not fully understood. Also, the uncanny valley has yet to be resolved, which may impair users' perception & acceptance of avatars & associated training scenarios. Gender has emerged as an important consideration when designing avatars, both in terms of gender differences in trainee perceptions, & the impact of avatars gender on these perceptions & experiences. The startle response of participants is measured to determine the participants' affective response to how pleasant the avatar is perceived, to ensure positive training outcomes.",
"fno": "08273657",
"keywords": [
"Avatars",
"Cognition",
"Computer Based Training",
"Gender Issues",
"Psychology",
"Participant Gender Differences",
"Individual Trainee Factors",
"Trainee Perceptions",
"Avatars Gender",
"Uncanniness Perception",
"Training Amp Simulation",
"Avatars",
"Training",
"Atmospheric Measurements",
"Particle Measurements",
"Guidelines",
"Virtual Environments",
"Electromyography"
],
"authors": [
{
"affiliation": "School of Electrical Engineering & Computing, University of Newcastle, Callaghan, Australia",
"fullName": "Jacqueline Deanna Bailey",
"givenName": "Jacqueline",
"surname": "Deanna Bailey",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "571-575",
"year": "2017",
"issn": "2156-8111",
"isbn": "978-1-5386-0563-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08273656",
"articleId": "12OmNviHKbm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08273658",
"articleId": "12OmNrNh0J8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223377",
"title": "Avatar embodiment realism and virtual fitness training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223377/12OmNCcKQFn",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504761",
"title": "Avatar realism and social interaction quality in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2015/02/06853335",
"title": "Humans versus Computers: Impact of Emotion Expressions on People's Decision Making",
"doi": null,
"abstractUrl": "/journal/ta/2015/02/06853335/13rRUxASuz0",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260949",
"title": "The Effect of Gender Body-Swap Illusions on Working Memory and Stereotype Threat",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260949/13rRUynHujg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659175",
"title": "Do Great Minds Think Alike? : Racial/Ethnic and Gender Differences in Mindset of Undergraduate Engineering Students",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659175/18j9jm5E6GI",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a057",
"title": "Visual Fidelity Effects on Expressive Self-avatar in Virtual Reality: First Impressions Matter",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a057/1CJc41zMnFC",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049655",
"title": "Measuring Interpersonal Trust towards Virtual Humans with a Virtual Maze Paradigm",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049655/1KYouwvCMBa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798122",
"title": "An Initial Investigation into Stereotypical Influences on Implicit Racial Bias and Embodied Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798122/1cJ0MR4xjWg",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090457",
"title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1KOv807mslW",
"title": "2022 29th Asia-Pacific Software Engineering Conference (APSEC)",
"acronym": "apsec",
"groupId": "1000681",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1KOvi3RKoBG",
"doi": "10.1109/APSEC57359.2022.00068",
"title": "Preliminary Analysis of the Influence of the Stereotype Threat on Computer Programming",
"normalizedTitle": "Preliminary Analysis of the Influence of the Stereotype Threat on Computer Programming",
"abstract": "Background: Workforce shortage in information technology (IT) has become a pressing issue. To increase the number of female IT professionals, it is important to improve the working environments of female IT professionals. A previous study showed that gender bias is a threat to the outcomes of mathematics tests by females. This influence is called the stereotype threat. Aim: We focused on the stereotype threat to enable female IT developers to deliver their best performance. Method: Using a subjective experiment, we analyzed the relationship between the stereotype threat and the performance of computer programming developers. In addition, we analyzed the effect of eliminating the stereotype threat on programming. Result: Focusing on the time to develop a program, neither the stereotype threat nor the elimination of the threat had any noticeable effect. This result might be affected by the nature of computer programming and a gamification effect.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Background: Workforce shortage in information technology (IT) has become a pressing issue. To increase the number of female IT professionals, it is important to improve the working environments of female IT professionals. A previous study showed that gender bias is a threat to the outcomes of mathematics tests by females. This influence is called the stereotype threat. Aim: We focused on the stereotype threat to enable female IT developers to deliver their best performance. Method: Using a subjective experiment, we analyzed the relationship between the stereotype threat and the performance of computer programming developers. In addition, we analyzed the effect of eliminating the stereotype threat on programming. Result: Focusing on the time to develop a program, neither the stereotype threat nor the elimination of the threat had any noticeable effect. This result might be affected by the nature of computer programming and a gamification effect.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Background: Workforce shortage in information technology (IT) has become a pressing issue. To increase the number of female IT professionals, it is important to improve the working environments of female IT professionals. A previous study showed that gender bias is a threat to the outcomes of mathematics tests by females. This influence is called the stereotype threat. Aim: We focused on the stereotype threat to enable female IT developers to deliver their best performance. Method: Using a subjective experiment, we analyzed the relationship between the stereotype threat and the performance of computer programming developers. In addition, we analyzed the effect of eliminating the stereotype threat on programming. Result: Focusing on the time to develop a program, neither the stereotype threat nor the elimination of the threat had any noticeable effect. This result might be affected by the nature of computer programming and a gamification effect.",
"fno": "553700a487",
"keywords": [
"Computer Science Education",
"Gender Issues",
"Programming",
"Serious Games Computing",
"Computer Programming Developers",
"Gamification Effect",
"Information Technology",
"Mathematics Tests",
"Stereotype Threat",
"Atmospheric Measurements",
"Focusing",
"Pressing",
"Particle Measurements",
"Time Measurement",
"Mathematics",
"Computer Performance",
"Gender Bias",
"Developers X 2019 Performance",
"Subjective Experiment"
],
"authors": [
{
"affiliation": "Kindai University,Higashi-osaka,Japan",
"fullName": "Yuriko Takatsuka",
"givenName": "Yuriko",
"surname": "Takatsuka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kindai University,Higashi-osaka,Japan",
"fullName": "Masateru Tsunoda",
"givenName": "Masateru",
"surname": "Tsunoda",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "apsec",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-12-01T00:00:00",
"pubType": "proceedings",
"pages": "487-491",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5537-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "553700a482",
"articleId": "1KOvcMWFJuM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "553700a492",
"articleId": "1KOvfKBZGCY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2013/4892/0/4892c464",
"title": "Influence of Text and Participant Characteristics on Perceived and Actual Text Difficulty",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892c464/12OmNAGNCbQ",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2014/3922/0/07044011",
"title": "Proven practices that can reduce stereotype threat in engineering education: A literature review",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2014/07044011/12OmNCbU2Vv",
"parentPublication": {
"id": "proceedings/fie/2014/3922/0",
"title": "2014 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2014/3922/0/07044208",
"title": "Special session — \"Stereotype threat\" and my students: What can I do about it?",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2014/07044208/12OmNvk7JLh",
"parentPublication": {
"id": "proceedings/fie/2014/3922/0",
"title": "2014 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2011/9618/0/05718431",
"title": "A Field Investigation of the Nostalgia Effect",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2011/05718431/12OmNwt5snA",
"parentPublication": {
"id": "proceedings/hicss/2011/9618/0",
"title": "2011 44th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2010/6261/0/05673114",
"title": "The effect of soliciting demographic data on the performance of students on online tests",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2010/05673114/12OmNxETaoS",
"parentPublication": {
"id": "proceedings/fie/2010/6261/0",
"title": "2010 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icssi/2013/4985/0/4985a100",
"title": "Zebra Effect in Fashion Design: Challenging Consumer Stereotype on Striped Clothing",
"doi": null,
"abstractUrl": "/proceedings-article/icssi/2013/4985a100/12OmNzUxO50",
"parentPublication": {
"id": "proceedings/icssi/2013/4985/0",
"title": "2013 Fifth International Conference on Service Science and Innovation (ICSSI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260949",
"title": "The Effect of Gender Body-Swap Illusions on Working Memory and Stereotype Threat",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260949/13rRUynHujg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpc/2022/9298/0/929800a133",
"title": "Anchoring Code Understandability Evaluations Through Task Descriptions",
"doi": null,
"abstractUrl": "/proceedings-article/icpc/2022/929800a133/1EpKLi7yfEQ",
"parentPublication": {
"id": "proceedings/icpc/2022/9298/0",
"title": "2022 IEEE/ACM 30th International Conference on Program Comprehension (ICPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a318",
"title": "Visual Interventions for Career and Life-Design: An Exploratory Experimental Study",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a318/1cMFaOudORG",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-nier/2020/7126/0/712600a009",
"title": "Threat modeling: from infancy to maturity",
"doi": null,
"abstractUrl": "/proceedings-article/icse-nier/2020/712600a009/1sDsVFA3e24",
"parentPublication": {
"id": "proceedings/icse-nier/2020/7126/0",
"title": "2020 IEEE/ACM 42nd International Conference on Software Engineering: New Ideas and Emerging Results (ICSE-NIER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNsbGvCQ",
"title": "2016 30th International Conference on Advanced Information Networking and Applications Workshops (WAINA)",
"acronym": "waina",
"groupId": "1001766",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyiUBpb",
"doi": "10.1109/WAINA.2016.140",
"title": "Modification of Dijkstra Proximity Matrix for Diffraction and Reflection Rays",
"normalizedTitle": "Modification of Dijkstra Proximity Matrix for Diffraction and Reflection Rays",
"abstract": "In this paper is proposed a discrete ray tracing method by using Dijkstra Algorithm (DA) for electromagnetic field computation in complicated environments. The computational space is divided in terms of regularly arrayed rectangular solids with eight apex nodes for each rectangular solid. Connectivity of one node with others is allowed among twenty-six proximate nodes and the link costs between them is given by the traveling time of optical ray. The proposed DA procedures are classified into two stages. The first one treats source diffraction rays including incident rays, and the second one deals with image diffraction rays including reflection rays. Numerical examples show how the two types of diffraction rays, such as incident rays on floor or walls and reflection from there, can be traced from DA data in order to demonstrate the effectiveness of the present method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper is proposed a discrete ray tracing method by using Dijkstra Algorithm (DA) for electromagnetic field computation in complicated environments. The computational space is divided in terms of regularly arrayed rectangular solids with eight apex nodes for each rectangular solid. Connectivity of one node with others is allowed among twenty-six proximate nodes and the link costs between them is given by the traveling time of optical ray. The proposed DA procedures are classified into two stages. The first one treats source diffraction rays including incident rays, and the second one deals with image diffraction rays including reflection rays. Numerical examples show how the two types of diffraction rays, such as incident rays on floor or walls and reflection from there, can be traced from DA data in order to demonstrate the effectiveness of the present method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper is proposed a discrete ray tracing method by using Dijkstra Algorithm (DA) for electromagnetic field computation in complicated environments. The computational space is divided in terms of regularly arrayed rectangular solids with eight apex nodes for each rectangular solid. Connectivity of one node with others is allowed among twenty-six proximate nodes and the link costs between them is given by the traveling time of optical ray. The proposed DA procedures are classified into two stages. The first one treats source diffraction rays including incident rays, and the second one deals with image diffraction rays including reflection rays. Numerical examples show how the two types of diffraction rays, such as incident rays on floor or walls and reflection from there, can be traced from DA data in order to demonstrate the effectiveness of the present method.",
"fno": "2461a493",
"keywords": [
"Diffraction",
"Optical Diffraction",
"Optical Imaging",
"Three Dimensional Displays",
"Ray Tracing",
"Solids",
"Propagation",
"Dijkstra Algorithm",
"Ray Tracing",
"Discrete Ray Tracing Method",
"Diffraction"
],
"authors": [
{
"affiliation": null,
"fullName": "Kazunori Uchida",
"givenName": "Kazunori",
"surname": "Uchida",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Leonard Barolli",
"givenName": "Leonard",
"surname": "Barolli",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "waina",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "493-498",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-2461-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2461a487",
"articleId": "12OmNyS6REe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2461a499",
"articleId": "12OmNyxFKh9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/waina/2017/6231/0/6231a189",
"title": "Dijkstra-Algorithm Based Ray-Tracing by Controlling Proximity Node Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2017/6231a189/12OmNAIMO6l",
"parentPublication": {
"id": "proceedings/waina/2017/6231/0",
"title": "2017 31st International Conference on Advanced Information Networking and Applications: Workshops (WAINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2015/1775/0/1775a360",
"title": "A Discrete Ray Tracing Method Based on Dijkstra Algorithm for 3D Propagation Environments",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2015/1775a360/12OmNAndipS",
"parentPublication": {
"id": "proceedings/waina/2015/1775/0",
"title": "2015 IEEE 29th International Conference on Advanced Information Networking and Applications Workshops (WAINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gtsd/2016/3638/0/3638a161",
"title": "Phase Quantitative Computation for Multi-Phase Materials by Means of X-Ray Diffraction",
"doi": null,
"abstractUrl": "/proceedings-article/gtsd/2016/3638a161/12OmNBp52GE",
"parentPublication": {
"id": "proceedings/gtsd/2016/3638/0",
"title": "2016 3rd International Conference on Green Technology and Sustainable Development (GTSD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asmc/2003/7673/0/01194465",
"title": "Alignment and overlay metrology using a spectroscopic diffraction method",
"doi": null,
"abstractUrl": "/proceedings-article/asmc/2003/01194465/12OmNvA1hxz",
"parentPublication": {
"id": "proceedings/asmc/2003/7673/0",
"title": "IEEE/SEMI Advanced Semiconductor Manufacturing Conference and Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2018/5395/0/539501a082",
"title": "Dijkstra Algorithm Based Ray Tracing: A Case Study for Tunnel Structures",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2018/539501a082/12OmNwMob9J",
"parentPublication": {
"id": "proceedings/waina/2018/5395/0",
"title": "2018 32nd International Conference on Advanced Information Networking and Applications Workshops (WAINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2014/4261/0/4261a662",
"title": "Obliquely Incidence Scattering by an Anisotropic Impedance Wedge: Surface Waves and the Diffraction of the Surface Waves",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2014/4261a662/12OmNxb5hul",
"parentPublication": {
"id": "proceedings/isdea/2014/4261/0",
"title": "2014 Fifth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2012/4652/0/4652a633",
"title": "On Accuracy of Discrete Ray Tracing Method in Comparison with Rigorous Solutions",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2012/4652a633/12OmNxbEtJX",
"parentPublication": {
"id": "proceedings/waina/2012/4652/0",
"title": "2012 26th International Conference on Advanced Information Networking and Applications Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ewdts/2017/3299/0/08110105",
"title": "Efficient three-dimensional ray tracing and electromagnetic field intensity estimation algorithm for WLAN",
"doi": null,
"abstractUrl": "/proceedings-article/ewdts/2017/08110105/12OmNyRPgyv",
"parentPublication": {
"id": "proceedings/ewdts/2017/3299/0",
"title": "2017 IEEE East-West Design & Test Symposium (EWDTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08307458",
"title": "Diffraction Kernels for Interactive Sound Propagation in Dynamic Environments",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08307458/13rRUwh80Hk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09872132",
"title": "Towards Mixed-State Coded Diffraction Imaging",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09872132/1GhRKXiGLEA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1fZBGcT",
"doi": "10.1109/VR.2018.8446262",
"title": "Preliminary Environment Mapping for Redirected Walking",
"normalizedTitle": "Preliminary Environment Mapping for Redirected Walking",
"abstract": "Redirected walking applications allow a user to explore large virtual environments in a smaller physical space by employing so-called redirection techniques. To further improve the immersion of a virtual experience, path planner algorithms were developed which choose redirection techniques based on the current position and orientation of the user. In order to ensure a reliable performance, planning algorithms depend on accurate position tracking using an external tracking system. However, the disadvantage of such a tracking method is the time-consuming preparation of the physical environment which renders the system immobile. A possible solution to eliminate this dependency is to replace the external tracking system with a state-of-the-art inside-out tracker based on the concept of Simultaneous Localization and Mapping (SLAM). In this paper, we present an approach in which we attach a commercially available SLAM device to a head-mounted display to track the head motion of a user. From sensor recordings of the device, we construct a map of the surrounding environment for future processing in an existing path planner for redirected walking.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking applications allow a user to explore large virtual environments in a smaller physical space by employing so-called redirection techniques. To further improve the immersion of a virtual experience, path planner algorithms were developed which choose redirection techniques based on the current position and orientation of the user. In order to ensure a reliable performance, planning algorithms depend on accurate position tracking using an external tracking system. However, the disadvantage of such a tracking method is the time-consuming preparation of the physical environment which renders the system immobile. A possible solution to eliminate this dependency is to replace the external tracking system with a state-of-the-art inside-out tracker based on the concept of Simultaneous Localization and Mapping (SLAM). In this paper, we present an approach in which we attach a commercially available SLAM device to a head-mounted display to track the head motion of a user. From sensor recordings of the device, we construct a map of the surrounding environment for future processing in an existing path planner for redirected walking.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking applications allow a user to explore large virtual environments in a smaller physical space by employing so-called redirection techniques. To further improve the immersion of a virtual experience, path planner algorithms were developed which choose redirection techniques based on the current position and orientation of the user. In order to ensure a reliable performance, planning algorithms depend on accurate position tracking using an external tracking system. However, the disadvantage of such a tracking method is the time-consuming preparation of the physical environment which renders the system immobile. A possible solution to eliminate this dependency is to replace the external tracking system with a state-of-the-art inside-out tracker based on the concept of Simultaneous Localization and Mapping (SLAM). In this paper, we present an approach in which we attach a commercially available SLAM device to a head-mounted display to track the head motion of a user. From sensor recordings of the device, we construct a map of the surrounding environment for future processing in an existing path planner for redirected walking.",
"fno": "08446262",
"keywords": [
"Helmet Mounted Displays",
"Path Planning",
"SLAM Robots",
"Virtual Reality",
"Redirected Walking",
"Walking Applications",
"Virtual Environments",
"Redirection Techniques",
"Virtual Experience",
"Path Planner Algorithms",
"External Tracking System",
"Tracking Method",
"Path Planner",
"SLAM Device",
"Preliminary Environment Mapping",
"Simultaneous Localization And Mapping",
"Head Mounted Display",
"Legged Locomotion",
"Simultaneous Localization And Mapping",
"Three Dimensional Displays",
"Planning",
"Cloud Computing",
"Virtual Environments",
"Human Centered Computing Virtual Reality",
"Computing Methodologies Tracking"
],
"authors": [
{
"affiliation": "Innovation Center Virtual Reality, ETH Zurich",
"fullName": "Christian Hirt",
"givenName": "Christian",
"surname": "Hirt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Innovation Center Virtual Reality, ETH Zurich",
"fullName": "Markus Zank",
"givenName": "Markus",
"surname": "Zank",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Innovation Center Virtual Reality, ETH Zurich",
"fullName": "Andreas Kunz",
"givenName": "Andreas",
"surname": "Kunz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "573-574",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446345",
"articleId": "13bd1fZBGbI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446240",
"articleId": "13bd1fph1yO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504742",
"title": "Simultaneous mapping and redirected walking for ad hoc free walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504742/12OmNyUFg0I",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460030",
"title": "Eye tracking for locomotion prediction in redirected walking",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460030/12OmNz4SOsF",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/11/ttg2013111872",
"title": "Optimizing Constrained-Environment Redirected Walking Instructions Using Search Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2013/11/ttg2013111872/13rRUIM2VBH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040634",
"title": "Comparing Four Approaches to Generalized Redirected Walking: Simulation and Live User Data",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040634/13rRUx0Pqpx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09715723",
"title": "Adaptive Redirection: A Context-Aware Redirected Walking Meta-Strategy",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09715723/1B4hxCQXB4c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049730",
"title": "Monte-Carlo Redirected Walking: Gain Selection Through Simulated Walks",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049730/1KYowitu5OM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798118",
"title": "PReWAP: Predictive Redirected Walking Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798118/1cJ0XGXV02s",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798121",
"title": "Real-time Optimal Planning for Redirected Walking Using Deep Q-Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798121/1cJ17Y60ruM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2016/0840/0/07859537",
"title": "The redirected walking toolkit: a unified development platform for exploring large virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2016/07859537/1h0Jm3Gvypy",
"parentPublication": {
"id": "proceedings/wevr/2016/0840/0",
"title": "2016 IEEE 2nd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090595",
"title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0Y99SR1K",
"doi": "10.1109/VR.2019.8797976",
"title": "Estimation of Detection Thresholds for Redirected Turning",
"normalizedTitle": "Estimation of Detection Thresholds for Redirected Turning",
"abstract": "Redirection makes it possible to walk around a vast virtual space in a limited real space while providing a natural walking sensation by applying a gain to the amount of movement in a real space. However, manipulating the walking path while keeping it and maintaining the naturalness of walking when turning at a corner cannot be achieved by the existing methods. To realize natural manipulation for turning at a corner, this study proposes novel “turning gains”, which refer to the increase in real and virtual turning degrees. The result of an experiment which aims to estimate the detection thresholds of turning gains indicated that when the turning radius is 0.5 m, discrimination is more difficult compared with the rotation gains (r = 0.0m).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirection makes it possible to walk around a vast virtual space in a limited real space while providing a natural walking sensation by applying a gain to the amount of movement in a real space. However, manipulating the walking path while keeping it and maintaining the naturalness of walking when turning at a corner cannot be achieved by the existing methods. To realize natural manipulation for turning at a corner, this study proposes novel “turning gains”, which refer to the increase in real and virtual turning degrees. The result of an experiment which aims to estimate the detection thresholds of turning gains indicated that when the turning radius is 0.5 m, discrimination is more difficult compared with the rotation gains (r = 0.0m).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirection makes it possible to walk around a vast virtual space in a limited real space while providing a natural walking sensation by applying a gain to the amount of movement in a real space. However, manipulating the walking path while keeping it and maintaining the naturalness of walking when turning at a corner cannot be achieved by the existing methods. To realize natural manipulation for turning at a corner, this study proposes novel “turning gains”, which refer to the increase in real and virtual turning degrees. The result of an experiment which aims to estimate the detection thresholds of turning gains indicated that when the turning radius is 0.5 m, discrimination is more difficult compared with the rotation gains (r = 0.0m).",
"fno": "08797976",
"keywords": [
"Virtual Reality",
"Redirected Turning",
"Natural Walking Sensation",
"Walking Path",
"Natural Manipulation",
"Turning Gains",
"Virtual Turning Degrees",
"Turning Radius",
"Rotation Gains",
"Virtual Space",
"Turning",
"Legged Locomotion",
"Resists",
"Virtual Environments",
"Three Dimensional Displays",
"User Interfaces",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Graduate School of Information Science and Technology, The University of Tokyo",
"fullName": "Junya Mizutani",
"givenName": "Junya",
"surname": "Mizutani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Information Science and Technology, The University of Tokyo",
"fullName": "Keigo Matsumoto",
"givenName": "Keigo",
"surname": "Matsumoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Information Science and Technology, The University of Tokyo",
"fullName": "Ryohei Nagao",
"givenName": "Ryohei",
"surname": "Nagao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Information Science and Technology, The University of Tokyo",
"fullName": "Takuji Narumi",
"givenName": "Takuji",
"surname": "Narumi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Information Science and Technology, The University of Tokyo",
"fullName": "Tomohiro Tanikawa",
"givenName": "Tomohiro",
"surname": "Tanikawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Information Science and Technology, The University of Tokyo",
"fullName": "Michitaka Hirose",
"givenName": "Michitaka",
"surname": "Hirose",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1090-1091",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797726",
"articleId": "1cJ0VwwRIA0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798186",
"articleId": "1cJ0WnO3NeM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892279",
"title": "Curvature gains in redirected walking: A closer look",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892279/12OmNBEGYJE",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802053",
"title": "An enhanced steering algorithm for redirected walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549412",
"title": "Estimation of detection thresholds for acoustic based redirected walking techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549412/12OmNz2C1yn",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446062",
"title": "Biomechanical Parameters Under Curvature Gains and Bending Gains in Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446062/13bd1fKQxrR",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446225",
"title": "Effect of Environment Size on Curvature Redirected Walking Thresholds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010017",
"title": "Estimation of Detection Thresholds for Redirected Walking Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010017/13rRUxZ0o1t",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645699",
"title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645699/17PYElBjW00",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a830",
"title": "Redirected Walking in 360° Video: Effect of Environment Size on Detection Thresholds for Translation and Rotation Gains",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a830/1CJd1TReEYo",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798117",
"title": "Estimation of Rotation Gain Thresholds for Redirected Walking Considering FOV and Gender",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798117/1cJ1fo5PwqY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794563",
"title": "Estimation of Rotation Gain Thresholds Considering FOV, Gender, and Distractors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794563/1dNHkjixhDi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ15zHucrC",
"doi": "10.1109/VR.2019.8797989",
"title": "Redirected Jumping: Imperceptibly Manipulating Jump Motions in Virtual Reality",
"normalizedTitle": "Redirected Jumping: Imperceptibly Manipulating Jump Motions in Virtual Reality",
"abstract": "Jumping is a fundamental movement in our daily lives that is often used in many video games. However, little research has been done on jumping and its possible use as a redirection technique in virtual reality (VR). In this study we explore Redirected Jumping, a novel redirection technique which enables us to purposefully manipulate the mapping of the user's physical jumping movements (e.g., distance and direction) to movement in the virtual space, allowing richer and more active physical VR experiences within a limited tracking area. To demonstrate the possibilities afforded by Redirected Jumping, we implemented a jump detection algorithm and jumping redirection methods for three basic jumping actions (i.e., horizontal, vertical, and rotational jumps) using common VR devices. We conducted three user studies to investigate the effective manipulation ranges, and the results revealed that our methods can manipulate a user's jumping movements without his/her noticing, similar to walking.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Jumping is a fundamental movement in our daily lives that is often used in many video games. However, little research has been done on jumping and its possible use as a redirection technique in virtual reality (VR). In this study we explore Redirected Jumping, a novel redirection technique which enables us to purposefully manipulate the mapping of the user's physical jumping movements (e.g., distance and direction) to movement in the virtual space, allowing richer and more active physical VR experiences within a limited tracking area. To demonstrate the possibilities afforded by Redirected Jumping, we implemented a jump detection algorithm and jumping redirection methods for three basic jumping actions (i.e., horizontal, vertical, and rotational jumps) using common VR devices. We conducted three user studies to investigate the effective manipulation ranges, and the results revealed that our methods can manipulate a user's jumping movements without his/her noticing, similar to walking.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Jumping is a fundamental movement in our daily lives that is often used in many video games. However, little research has been done on jumping and its possible use as a redirection technique in virtual reality (VR). In this study we explore Redirected Jumping, a novel redirection technique which enables us to purposefully manipulate the mapping of the user's physical jumping movements (e.g., distance and direction) to movement in the virtual space, allowing richer and more active physical VR experiences within a limited tracking area. To demonstrate the possibilities afforded by Redirected Jumping, we implemented a jump detection algorithm and jumping redirection methods for three basic jumping actions (i.e., horizontal, vertical, and rotational jumps) using common VR devices. We conducted three user studies to investigate the effective manipulation ranges, and the results revealed that our methods can manipulate a user's jumping movements without his/her noticing, similar to walking.",
"fno": "08797989",
"keywords": [
"Virtual Reality",
"Redirected Jumping",
"Imperceptibly Manipulating Jump Motions",
"Virtual Reality",
"Fundamental Movement",
"Virtual Space",
"Jump Detection Algorithm",
"Jumping Redirection Methods",
"Basic Jumping Actions",
"Horizontal Jumps",
"Rotational Jumps",
"VR Experience",
"Vertical Jumps",
"Effective Manipulation",
"Redirection Technique",
"Legged Locomotion",
"Virtual Reality",
"Meters",
"Tracking",
"Games",
"Sports",
"Foot",
"Virtual Reality",
"Virtual Locomotion",
"Redirected Walking",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems X 2014 Artificial Augmented And Virtual Realities",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism X 2014 Virtual Reality"
],
"authors": [
{
"affiliation": "Research Institute of Electrical Communication, Tohoku University",
"fullName": "Daigo Hayashi",
"givenName": "Daigo",
"surname": "Hayashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research Institute of Electrical Communication, Tohoku University",
"fullName": "Kazuyuki Fujita",
"givenName": "Kazuyuki",
"surname": "Fujita",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research Institute of Electrical Communication, Tohoku University",
"fullName": "Kazuki Takashima",
"givenName": "Kazuki",
"surname": "Takashima",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "HIT Lab NZ, University of Canterbury",
"fullName": "Robert W. Lindeman",
"givenName": "Robert W.",
"surname": "Lindeman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research Institute of Electrical Communication, Tohoku University",
"fullName": "Yoshifumi Kitamura",
"givenName": "Yoshifumi",
"surname": "Kitamura",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "386-394",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797994",
"articleId": "1cJ19tjOG2s",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798286",
"articleId": "1cJ0PIoIPV6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446479",
"title": "Adopting the Roll Manipulation for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446263",
"title": "Mobius Walker: Pitch and Roll Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446263/13bd1gJ1v07",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09715723",
"title": "Adaptive Redirection: A Context-Aware Redirected Walking Meta-Strategy",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09715723/1B4hxCQXB4c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798118",
"title": "PReWAP: Predictive Redirected Walking Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798118/1cJ0XGXV02s",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798121",
"title": "Real-time Optimal Planning for Redirected Walking Using Deep Q-Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798121/1cJ17Y60ruM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2016/0840/0/07859537",
"title": "The redirected walking toolkit: a unified development platform for exploring large virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2016/07859537/1h0Jm3Gvypy",
"parentPublication": {
"id": "proceedings/wevr/2016/0840/0",
"title": "2016 IEEE 2nd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a550",
"title": "Where are you? Influence of Redirected Walking on Audio-Visual Position Estimation of Co-Located Users",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a550/1tnWDmPDtHG",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a095",
"title": "Detection Thresholds with Joint Horizontal and Vertical Gains in Redirected Jumping",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a095/1tuAwxIGXQI",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a201",
"title": "Manipulating Rotational Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a201/1yfxMXu7XhK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNylborB",
"title": "2016 IEEE 2nd Workshop on Everyday Virtual Reality (WEVR)",
"acronym": "wevr",
"groupId": "1807824",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "1h0Jm3Gvypy",
"doi": "10.1109/WEVR.2016.7859537",
"title": "The redirected walking toolkit: a unified development platform for exploring large virtual environments",
"normalizedTitle": "The redirected walking toolkit: a unified development platform for exploring large virtual environments",
"abstract": "With the imminent emergence of low-cost tracking solutions, everyday VR users will soon experience the enhanced immersion of natural walking. Even with consumer-grade room-scale tracking, exploring large virtual environments can be made possible using a software solution known as redirected walking. Wide adoption of this technique has been hindered by the complexity and subtleties involved in successfully deploying redirection. To address this matter, we introduce the Redirected Walking Toolkit, to serve as a unified platform for developing, benchmarking, and deploying redirected walking algorithms. Our design enables seamless integration with standard virtual reality configurations, requiring minimal setup effort for content developers. The toolkit's flexible architecture offers an interface that is not only easy to extend, but also complimented with a suite of simulation tools for testing and analysis. We envision the Redirected Walking Toolkit to be a common testbed for VR researchers as well as a publicly-available tool for large virtual exploration in virtual reality applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the imminent emergence of low-cost tracking solutions, everyday VR users will soon experience the enhanced immersion of natural walking. Even with consumer-grade room-scale tracking, exploring large virtual environments can be made possible using a software solution known as redirected walking. Wide adoption of this technique has been hindered by the complexity and subtleties involved in successfully deploying redirection. To address this matter, we introduce the Redirected Walking Toolkit, to serve as a unified platform for developing, benchmarking, and deploying redirected walking algorithms. Our design enables seamless integration with standard virtual reality configurations, requiring minimal setup effort for content developers. The toolkit's flexible architecture offers an interface that is not only easy to extend, but also complimented with a suite of simulation tools for testing and analysis. We envision the Redirected Walking Toolkit to be a common testbed for VR researchers as well as a publicly-available tool for large virtual exploration in virtual reality applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the imminent emergence of low-cost tracking solutions, everyday VR users will soon experience the enhanced immersion of natural walking. Even with consumer-grade room-scale tracking, exploring large virtual environments can be made possible using a software solution known as redirected walking. Wide adoption of this technique has been hindered by the complexity and subtleties involved in successfully deploying redirection. To address this matter, we introduce the Redirected Walking Toolkit, to serve as a unified platform for developing, benchmarking, and deploying redirected walking algorithms. Our design enables seamless integration with standard virtual reality configurations, requiring minimal setup effort for content developers. The toolkit's flexible architecture offers an interface that is not only easy to extend, but also complimented with a suite of simulation tools for testing and analysis. We envision the Redirected Walking Toolkit to be a common testbed for VR researchers as well as a publicly-available tool for large virtual exploration in virtual reality applications.",
"fno": "07859537",
"keywords": [
"Gait Analysis",
"Virtual Reality",
"Redirected Walking Toolkit",
"Unified Development Platform",
"Natural Walking Immersion",
"Consumer Grade Room Scale Tracking",
"Software Solution",
"Redirected Walking",
"Virtual Reality Configurations",
"Legged Locomotion",
"Prediction Algorithms",
"Safety",
"Virtual Environments",
"Trajectory",
"Tracking"
],
"authors": [
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Mahdi Azmandian",
"givenName": "Mahdi",
"surname": "Azmandian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Timofey Grechkin",
"givenName": "Timofey",
"surname": "Grechkin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC School of Cinematic Arts",
"fullName": "Mark Bolas",
"givenName": "Mark",
"surname": "Bolas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Evan Suma",
"givenName": "Evan",
"surname": "Suma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wevr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "9-14",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-0840-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07859536",
"articleId": "12OmNx4yvxI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07859538",
"articleId": "12OmNyL0TJW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460032",
"title": "Automated path prediction for redirected walking using navigation meshes",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460032/12OmNBKEymO",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549395",
"title": "Flexible and general redirected walking for head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549395/12OmNxFJXN3",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446579",
"title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07036075",
"title": "Cognitive Resource Demands of Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09715723",
"title": "Adaptive Redirection: A Context-Aware Redirected Walking Meta-Strategy",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09715723/1B4hxCQXB4c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0/217400a349",
"title": "A Redirected Walking Toolkit for Exploring Large-Scale Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2021/217400a349/1BLnzoFxHHy",
"parentPublication": {
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0",
"title": "2021 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09961901",
"title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798319",
"title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090595",
"title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2022/02/09364750",
"title": "Multi-Technique Redirected Walking Method",
"doi": null,
"abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1f3HvEZ",
"doi": "10.1109/VR.2018.8446495",
"title": "Head-to-Body-Pose Classification in No-Pose VR Tracking Systems",
"normalizedTitle": "Head-to-Body-Pose Classification in No-Pose VR Tracking Systems",
"abstract": "Pose tracking does not yet reliably work in large-scale interactive multi-user VR. Our novel head orientation estimation combines a single inertial sensor located at the user's head with inaccurate positional tracking. We exploit that users tend to walk in their viewing direction and classify head and body motion to estimate heading drift. This enables low-cost long-time stable head orientation. We evaluate our method and show that we sustain immersion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Pose tracking does not yet reliably work in large-scale interactive multi-user VR. Our novel head orientation estimation combines a single inertial sensor located at the user's head with inaccurate positional tracking. We exploit that users tend to walk in their viewing direction and classify head and body motion to estimate heading drift. This enables low-cost long-time stable head orientation. We evaluate our method and show that we sustain immersion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Pose tracking does not yet reliably work in large-scale interactive multi-user VR. Our novel head orientation estimation combines a single inertial sensor located at the user's head with inaccurate positional tracking. We exploit that users tend to walk in their viewing direction and classify head and body motion to estimate heading drift. This enables low-cost long-time stable head orientation. We evaluate our method and show that we sustain immersion.",
"fno": "08446495",
"keywords": [
"Computer Vision",
"Gesture Recognition",
"Image Classification",
"Pose Estimation",
"Sensors",
"Tracking",
"Virtual Reality",
"Pose Tracking",
"Large Scale Interactive Multiuser VR",
"Novel Head Orientation Estimation",
"Single Inertial Sensor",
"Inaccurate Positional Tracking",
"Body Motion",
"Heading Drift",
"Long Time Stable Head Orientation",
"Head To Body Pose Classification",
"Head Classification",
"Nopose VR Tracking Systems",
"Magnetic Heads",
"Feature Extraction",
"Head",
"Tracking",
"Legged Locomotion",
"Reliability",
"Estimation",
"VR",
"Head Tracking",
"Inertial Sensor Fusion",
"Immersion",
"Large Scale",
"Machine Learning",
"Motion Sickness",
"Computing Methodologies Supervised Learning By Classification Human Centered Computing Virtual Reality"
],
"authors": [
{
"affiliation": "Erlangen-Nürnberg (FAU), Programming Systems Group Friedrich-Alexander University",
"fullName": "Tobias Feigl",
"givenName": "Tobias",
"surname": "Feigl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fraunhofer IIS, Machine Learning and Information Fusion Group",
"fullName": "Christopher Mutschler",
"givenName": "Christopher",
"surname": "Mutschler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Erlangen-Nürnberg (FAU), Programming Systems Group Friedrich-Alexander University",
"fullName": "Michael Philippsen",
"givenName": "Michael",
"surname": "Philippsen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-2",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446230",
"articleId": "13bd1sx4Zt9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446488",
"articleId": "13bd1gJ1v0N",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/svr/2013/5001/0/06655775",
"title": "Correcting User's Head and Body Orientation Using a Comfort Pose Function",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2013/06655775/12OmNrIae9b",
"parentPublication": {
"id": "proceedings/svr/2013/5001/0",
"title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130456",
"title": "Appearance-based head pose estimation with scene-specific adaptation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130456/12OmNxXl5xs",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b869",
"title": "Appearance-Based Gaze Tracking with Free Head Movement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b869/12OmNyo1nKa",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446300",
"title": "Human Compensation Strategies for Orientation Drifts",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446300/13bd1fdV4lD",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/10/07346504",
"title": "Social Grouping for Multi-Target Tracking and Head Pose Estimation in Video",
"doi": null,
"abstractUrl": "/journal/tp/2016/10/07346504/13rRUxly9fd",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/06/07254213",
"title": "A Multi-Task Learning Framework for Head Pose Estimation under Target Motion",
"doi": null,
"abstractUrl": "/journal/tp/2016/06/07254213/13rRUy0HYL3",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486490",
"title": "FI-CAP: Robust Framework to Benchmark Head Pose Estimation in Challenging Environments",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486490/14jQfOLF2bC",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c237",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/taai/2018/1229/0/122900a046",
"title": "Successive Human Tracking and Posture Estimation with Multiple Omnidirectional Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/taai/2018/122900a046/17D45XoXP4l",
"parentPublication": {
"id": "proceedings/taai/2018/1229/0",
"title": "2018 Conference on Technologies and Applications of Artificial Intelligence (TAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2018/9385/0/938500a676",
"title": "Action and Intention Recognition of Pedestrians in Urban Traffic",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2018/938500a676/19RSwQ4eq9q",
"parentPublication": {
"id": "proceedings/sitis/2018/9385/0",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45VW8brT",
"doi": "10.1109/CVPR.2018.00559",
"title": "Gaze Prediction in Dynamic 360° Immersive Videos",
"normalizedTitle": "Gaze Prediction in Dynamic 360° Immersive Videos",
"abstract": "This paper explores gaze prediction in dynamic 360° immersive videos, i.e., based on the history scan path and VR contents, we predict where a viewer will look at an upcoming time. To tackle this problem, we first present the large-scale eye-tracking in dynamic VR scene dataset. Our dataset contains 208 360° videos captured in dynamic scenes, and each video is viewed by at least 31 subjects. Our analysis shows that gaze prediction depends on its history scan path and image contents. In terms of the image contents, those salient objects easily attract viewers' attention. On the one hand, the saliency is related to both appearance and motion of the objects. Considering that the saliency measured at different scales is different, we propose to compute saliency maps at different spatial scales: the sub-image patch centered at current gaze point, the sub-image corresponding to the Field of View (FoV), and the panorama image. Then we feed both the saliency maps and the corresponding images into a Convolutional Neural Network (CNN) for feature extraction. Meanwhile, we also use a Long-Short-Term-Memory (LSTM) to encode the history scan path. Then we combine the CNN features and LSTM features for gaze displacement prediction between gaze point at a current time and gaze point at an upcoming time. Extensive experiments validate the effectiveness of our method for gaze prediction in dynamic VR scenes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper explores gaze prediction in dynamic 360° immersive videos, i.e., based on the history scan path and VR contents, we predict where a viewer will look at an upcoming time. To tackle this problem, we first present the large-scale eye-tracking in dynamic VR scene dataset. Our dataset contains 208 360° videos captured in dynamic scenes, and each video is viewed by at least 31 subjects. Our analysis shows that gaze prediction depends on its history scan path and image contents. In terms of the image contents, those salient objects easily attract viewers' attention. On the one hand, the saliency is related to both appearance and motion of the objects. Considering that the saliency measured at different scales is different, we propose to compute saliency maps at different spatial scales: the sub-image patch centered at current gaze point, the sub-image corresponding to the Field of View (FoV), and the panorama image. Then we feed both the saliency maps and the corresponding images into a Convolutional Neural Network (CNN) for feature extraction. Meanwhile, we also use a Long-Short-Term-Memory (LSTM) to encode the history scan path. Then we combine the CNN features and LSTM features for gaze displacement prediction between gaze point at a current time and gaze point at an upcoming time. Extensive experiments validate the effectiveness of our method for gaze prediction in dynamic VR scenes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper explores gaze prediction in dynamic 360° immersive videos, i.e., based on the history scan path and VR contents, we predict where a viewer will look at an upcoming time. To tackle this problem, we first present the large-scale eye-tracking in dynamic VR scene dataset. Our dataset contains 208 360° videos captured in dynamic scenes, and each video is viewed by at least 31 subjects. Our analysis shows that gaze prediction depends on its history scan path and image contents. In terms of the image contents, those salient objects easily attract viewers' attention. On the one hand, the saliency is related to both appearance and motion of the objects. Considering that the saliency measured at different scales is different, we propose to compute saliency maps at different spatial scales: the sub-image patch centered at current gaze point, the sub-image corresponding to the Field of View (FoV), and the panorama image. Then we feed both the saliency maps and the corresponding images into a Convolutional Neural Network (CNN) for feature extraction. Meanwhile, we also use a Long-Short-Term-Memory (LSTM) to encode the history scan path. Then we combine the CNN features and LSTM features for gaze displacement prediction between gaze point at a current time and gaze point at an upcoming time. Extensive experiments validate the effectiveness of our method for gaze prediction in dynamic VR scenes.",
"fno": "642000f333",
"keywords": [
"Convolutional Neural Nets",
"Eye",
"Feature Extraction",
"Object Detection",
"Video Signal Processing",
"Virtual Reality",
"Gaze Prediction",
"Large Scale Eye Tracking",
"Dynamic VR Scene Dataset",
"Panorama Image",
"Gaze Displacement Prediction",
"Convolutional Neural Network",
"Long Short Term Memory",
"Videos",
"Gaze Tracking",
"Resists",
"Task Analysis",
"Saliency Detection",
"Games",
"Cameras"
],
"authors": [
{
"affiliation": null,
"fullName": "Yanyu Xu",
"givenName": "Yanyu",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yanbing Dong",
"givenName": "Yanbing",
"surname": "Dong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Junru Wu",
"givenName": "Junru",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhengzhong Sun",
"givenName": "Zhengzhong",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhiru Shi",
"givenName": "Zhiru",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jingyi Yu",
"givenName": "Jingyi",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shenghua Gao",
"givenName": "Shenghua",
"surname": "Gao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "5333-5342",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000f323",
"articleId": "17D45WODaoV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000f343",
"articleId": "17D45WHONp0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2011/0394/0/05995675",
"title": "Probabilistic gaze estimation without active personal calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995675/12OmNC8MsAV",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446215",
"title": "Gaze Guidance in Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446215/13bd1gJ1v0y",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486537",
"title": "A Subjective Study of Viewer Navigation Behaviors When Watching 360-Degree Videos on Computers",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486537/14jQfTvagGm",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a832",
"title": "GazeDock: Gaze-Only Menu Selection in Virtual Reality using Auto-Triggering Peripheral Menu",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a832/1CJbR6qnKdW",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09802919",
"title": "Continuous Gaze Tracking With Implicit Saliency-Aware Calibration on Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09802919/1Eo1vvDggH6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a787",
"title": "VRDoc: Gaze-based Interactions for VR Reading Experience",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a787/1JrRgFp6G2s",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10038574",
"title": "Scanpath Prediction on Information Visualisations",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10038574/1KxPXyC69b2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998375",
"title": "DGaze: CNN-Based Gaze Prediction in Dynamic Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998375/1hpPBdSWXTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090417",
"title": "Gaze Analysis and Prediction in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090417/1jIxqNN9Xqw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09497715",
"title": "Spherical DNNs and Their Applications in 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math></inline-formula> Images and Videos",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09497715/1vzY9kuYnwA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJdaD5K7Vm",
"doi": "10.1109/VRW55335.2022.00223",
"title": "Who do you look like? - Gaze-based authentication for workers in VR",
"normalizedTitle": "Who do you look like? - Gaze-based authentication for workers in VR",
"abstract": "Behavior-based authentication methods are actively being developed for XR. In particular, gaze-based methods promise continuous au-thentication of remote users. However, gaze behavior depends on the task being performed. Identification rate is typically highest when comparing data from the same task. In this study, we compared authentication performance using VR gaze data during random dot viewing, 360-degree image viewing, and a nuclear training simu-lation. We found that within-task authentication performed best for image viewing (72%). The implication for practitioners is to integrate image viewing into a VR workflow to collect gaze data that is viable for authentication.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Behavior-based authentication methods are actively being developed for XR. In particular, gaze-based methods promise continuous au-thentication of remote users. However, gaze behavior depends on the task being performed. Identification rate is typically highest when comparing data from the same task. In this study, we compared authentication performance using VR gaze data during random dot viewing, 360-degree image viewing, and a nuclear training simu-lation. We found that within-task authentication performed best for image viewing (72%). The implication for practitioners is to integrate image viewing into a VR workflow to collect gaze data that is viable for authentication.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Behavior-based authentication methods are actively being developed for XR. In particular, gaze-based methods promise continuous au-thentication of remote users. However, gaze behavior depends on the task being performed. Identification rate is typically highest when comparing data from the same task. In this study, we compared authentication performance using VR gaze data during random dot viewing, 360-degree image viewing, and a nuclear training simu-lation. We found that within-task authentication performed best for image viewing (72%). The implication for practitioners is to integrate image viewing into a VR workflow to collect gaze data that is viable for authentication.",
"fno": "840200a744",
"keywords": [
"Authorisation",
"Human Computer Interaction",
"Virtual Reality",
"Behavior Based Authentication Methods",
"Continuous Authentication",
"Remote Users",
"Identification Rate",
"Authentication Performance",
"VR Gaze Data",
"Random Dot Viewing",
"360 Degree Image Viewing",
"Within Task Authentication",
"VR Workflow",
"Gaze Based Authentication",
"XR",
"Nuclear Training Simulation",
"Image Viewing",
"Training",
"Three Dimensional Displays",
"Conferences",
"Authentication",
"User Interfaces",
"Task Analysis",
"X Reality",
"Eye Tracking",
"Virtual Reality",
"Authentication",
"Future Of Work",
"Human Centered Computing Systems And Tools For Interaction Design",
"Computing Methodologies Virtual Reality"
],
"authors": [
{
"affiliation": "University of Florida",
"fullName": "Karina LaRubbio",
"givenName": "Karina",
"surname": "LaRubbio",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida",
"fullName": "Jeremiah Wright",
"givenName": "Jeremiah",
"surname": "Wright",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida",
"fullName": "Brendan David-John",
"givenName": "Brendan",
"surname": "David-John",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida",
"fullName": "Andreas Enqvist",
"givenName": "Andreas",
"surname": "Enqvist",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida",
"fullName": "Eakta Jain",
"givenName": "Eakta",
"surname": "Jain",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "744-745",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a742",
"articleId": "1CJfetqDtnO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a746",
"articleId": "1CJcGN8dsS4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2022/9617/0/961700a832",
"title": "GazeDock: Gaze-Only Menu Selection in Virtual Reality using Auto-Triggering Peripheral Menu",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a832/1CJbR6qnKdW",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a562",
"title": "Gaze Capture based Considerate Behaviour Control of Virtual Guiding Agent",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a562/1CJfoWhFCXm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a787",
"title": "VRDoc: Gaze-based Interactions for VR Reading Experience",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a787/1JrRgFp6G2s",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a620",
"title": "Exploring Enhancements towards Gaze Oriented Parallel Views in Immersive Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a620/1MNgG4plx6w",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798204",
"title": "Studying Gaze Behaviour during Collision Avoidance with a Virtual Walker: Influence of the Virtual Reality Setup",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798204/1cJ11rHzFi8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090417",
"title": "Gaze Analysis and Prediction in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090417/1jIxqNN9Xqw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090627",
"title": "Hand Motion with Eyes-free Interaction for Authentication in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090627/1jIxvvNRdJu",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a536",
"title": "Gaze-Pinch Menu: Performing Multiple Interactions Concurrently in Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a536/1tnXBidgc48",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a520",
"title": "Subtle Gaze Guidance for 360° Content by Gradual Brightness Modulation and Termination of Modulation by Gaze Approaching",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a520/1tnXfbb0lFK",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a140",
"title": "Using Siamese Neural Networks to Perform Cross-System Behavioral Authentication in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a140/1tuAMntNvxu",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1fHkkWQ0aEE",
"title": "2019 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1fHkoP8izEQ",
"doi": "10.1109/CW.2019.00019",
"title": "Visual Saliency Prediction in Dynamic Virtual Reality Environments Experienced with Head-Mounted Displays: An Exploratory Study",
"normalizedTitle": "Visual Saliency Prediction in Dynamic Virtual Reality Environments Experienced with Head-Mounted Displays: An Exploratory Study",
"abstract": "This work explores a set of well-studied visual saliency features through seven saliency prediction methods with the aim of assessing how applicable they are for estimating visual saliency in dynamic virtual reality (VR) environments that are experienced with head-mounted displays. An in-depth analysis of how the saliency methods that make use of depth cues compare to ones that are based on purely image-based (2D) features is presented. To this end, a user study was conducted to collect gaze data from participants as they were shown the same set of three dynamic scenes in 2D desktop viewing and 3D VR viewing using a head-mounted display. The scenes convey varying visual experiences in terms of contents and range of depth-of-field so that an extensive analysis encompassing a comprehensive array of viewing behaviors could be provided. The results indicate that 2D features matter as much as depth for both viewing conditions, yet depth cue is slightly more important for 3D VR viewing. Furthermore, including depth as an additional cue to the 2D saliency methods improves prediction for both viewing conditions, and the benefit margin is greater in 3D VR viewing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work explores a set of well-studied visual saliency features through seven saliency prediction methods with the aim of assessing how applicable they are for estimating visual saliency in dynamic virtual reality (VR) environments that are experienced with head-mounted displays. An in-depth analysis of how the saliency methods that make use of depth cues compare to ones that are based on purely image-based (2D) features is presented. To this end, a user study was conducted to collect gaze data from participants as they were shown the same set of three dynamic scenes in 2D desktop viewing and 3D VR viewing using a head-mounted display. The scenes convey varying visual experiences in terms of contents and range of depth-of-field so that an extensive analysis encompassing a comprehensive array of viewing behaviors could be provided. The results indicate that 2D features matter as much as depth for both viewing conditions, yet depth cue is slightly more important for 3D VR viewing. Furthermore, including depth as an additional cue to the 2D saliency methods improves prediction for both viewing conditions, and the benefit margin is greater in 3D VR viewing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work explores a set of well-studied visual saliency features through seven saliency prediction methods with the aim of assessing how applicable they are for estimating visual saliency in dynamic virtual reality (VR) environments that are experienced with head-mounted displays. An in-depth analysis of how the saliency methods that make use of depth cues compare to ones that are based on purely image-based (2D) features is presented. To this end, a user study was conducted to collect gaze data from participants as they were shown the same set of three dynamic scenes in 2D desktop viewing and 3D VR viewing using a head-mounted display. The scenes convey varying visual experiences in terms of contents and range of depth-of-field so that an extensive analysis encompassing a comprehensive array of viewing behaviors could be provided. The results indicate that 2D features matter as much as depth for both viewing conditions, yet depth cue is slightly more important for 3D VR viewing. Furthermore, including depth as an additional cue to the 2D saliency methods improves prediction for both viewing conditions, and the benefit margin is greater in 3D VR viewing.",
"fno": "229700a061",
"keywords": [
"Helmet Mounted Displays",
"Human Computer Interaction",
"Virtual Reality",
"Head Mounted Display",
"In Depth Analysis",
"Saliency Methods",
"Depth Cue",
"Dynamic Scenes",
"2 D Desktop Viewing",
"3 D VR Viewing",
"Visual Experiences",
"Depth Of Field",
"Viewing Conditions",
"Visual Saliency Prediction",
"Dynamic Virtual Reality Environments",
"Visual Saliency Features",
"Seven Saliency Prediction Methods",
"Visualization",
"Two Dimensional Displays",
"Three Dimensional Displays",
"Image Color Analysis",
"Feature Extraction",
"Virtual Reality",
"Solid Modeling",
"Visual Saliency",
"Virtual Reality"
],
"authors": [
{
"affiliation": "TED University",
"fullName": "Dilara Albayrak",
"givenName": "Dilara",
"surname": "Albayrak",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hacettepe University",
"fullName": "Mehmet Bahadir Askin",
"givenName": "Mehmet Bahadir",
"surname": "Askin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TED University",
"fullName": "Tolga K. Capin",
"givenName": "Tolga K.",
"surname": "Capin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hacettepe University",
"fullName": "Ufuk Celikcan",
"givenName": "Ufuk",
"surname": "Celikcan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "61-68",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-2297-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "229700a057",
"articleId": "1fHkkZzKire",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "229700a069",
"articleId": "1fHkpp4xIJi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2014/4717/0/06890709",
"title": "Learning visual saliency for stereoscopic images",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890709/12OmNqIhFMD",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b707",
"title": "Learning Gaze Transitions from Depth to Improve Video Saliency Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b707/12OmNwwMf0f",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836518",
"title": "AR Tabletop Interface using a Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836518/12OmNyoiYW4",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a063",
"title": "[POSTER] Reactive Displays for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a063/12OmNzaQoyU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08269807",
"title": "Saliency in VR: How Do People Explore Virtual Environments?",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08269807/13rRUxDqS8o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b420",
"title": "Cube Padding for Weakly-Supervised Saliency Prediction in 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b420/17D45WB0qcO",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08765747",
"title": "Mesh Saliency via Weakly Supervised Classification-for-Saliency CNN",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08765747/1bLyqDlC7cY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382883",
"title": "FixationNet: Forecasting Eye Fixations in Task-Oriented Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382883/1saZvDJS360",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a131",
"title": "Diegetic Tool Management in a Virtual Reality Training Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a131/1tuAgbFhYCQ",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a070",
"title": "FPX-G: First Person Exploration for Graph",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a070/1xPso0QWilO",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1gyr6w5YIIU",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gyrLoWsF1u",
"doi": "10.1109/CVPR.2019.00998",
"title": "Learning to Explore Intrinsic Saliency for Stereoscopic Video",
"normalizedTitle": "Learning to Explore Intrinsic Saliency for Stereoscopic Video",
"abstract": "The human visual system excels at biasing the stereoscopic visual signals by the attention mechanisms. Traditional methods relying on the low-level features and depth relevant information for stereoscopic video saliency prediction have fundamental limitations. For example, it is cumbersome to model the interactions between multiple visual cues including spatial, temporal, and depth information as a result of the sophistication. In this paper, we argue that the high-level features are crucial and resort to the deep learning framework to learn the saliency map of stereoscopic videos. Driven by spatio-temporal coherence from consecutive frames, the model first imitates the mechanism of saliency by taking advantage of the 3D convolutional neural network. Subsequently, the saliency originated from the intrinsic depth is derived based on the correlations between left and right views in a data-driven manner. Finally, a Convolutional Long Short-Term Memory (Conv-LSTM) based fusion network is developed to model the instantaneous interactions between spatio-temporal and depth attributes, such that the ultimate stereoscopic saliency maps over time are produced. Moreover, we establish a new large-scale stereoscopic video saliency dataset (SVS) including 175 stereoscopic video sequences and their fixation density annotations, aiming to comprehensively study the intrinsic attributes for stereoscopic video saliency detection. Extensive experiments show that our proposed model can achieve superior performance compared to the state-of-the-art methods on the newly built dataset for stereoscopic videos.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The human visual system excels at biasing the stereoscopic visual signals by the attention mechanisms. Traditional methods relying on the low-level features and depth relevant information for stereoscopic video saliency prediction have fundamental limitations. For example, it is cumbersome to model the interactions between multiple visual cues including spatial, temporal, and depth information as a result of the sophistication. In this paper, we argue that the high-level features are crucial and resort to the deep learning framework to learn the saliency map of stereoscopic videos. Driven by spatio-temporal coherence from consecutive frames, the model first imitates the mechanism of saliency by taking advantage of the 3D convolutional neural network. Subsequently, the saliency originated from the intrinsic depth is derived based on the correlations between left and right views in a data-driven manner. Finally, a Convolutional Long Short-Term Memory (Conv-LSTM) based fusion network is developed to model the instantaneous interactions between spatio-temporal and depth attributes, such that the ultimate stereoscopic saliency maps over time are produced. Moreover, we establish a new large-scale stereoscopic video saliency dataset (SVS) including 175 stereoscopic video sequences and their fixation density annotations, aiming to comprehensively study the intrinsic attributes for stereoscopic video saliency detection. Extensive experiments show that our proposed model can achieve superior performance compared to the state-of-the-art methods on the newly built dataset for stereoscopic videos.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The human visual system excels at biasing the stereoscopic visual signals by the attention mechanisms. Traditional methods relying on the low-level features and depth relevant information for stereoscopic video saliency prediction have fundamental limitations. For example, it is cumbersome to model the interactions between multiple visual cues including spatial, temporal, and depth information as a result of the sophistication. In this paper, we argue that the high-level features are crucial and resort to the deep learning framework to learn the saliency map of stereoscopic videos. Driven by spatio-temporal coherence from consecutive frames, the model first imitates the mechanism of saliency by taking advantage of the 3D convolutional neural network. Subsequently, the saliency originated from the intrinsic depth is derived based on the correlations between left and right views in a data-driven manner. Finally, a Convolutional Long Short-Term Memory (Conv-LSTM) based fusion network is developed to model the instantaneous interactions between spatio-temporal and depth attributes, such that the ultimate stereoscopic saliency maps over time are produced. Moreover, we establish a new large-scale stereoscopic video saliency dataset (SVS) including 175 stereoscopic video sequences and their fixation density annotations, aiming to comprehensively study the intrinsic attributes for stereoscopic video saliency detection. Extensive experiments show that our proposed model can achieve superior performance compared to the state-of-the-art methods on the newly built dataset for stereoscopic videos.",
"fno": "329300j741",
"keywords": [
"Feature Extraction",
"Gaussian Processes",
"Image Representation",
"Image Sequences",
"Learning Artificial Intelligence",
"Neural Nets",
"Object Detection",
"Spatiotemporal Phenomena",
"Stereo Image Processing",
"Video Signal Processing",
"Visual Perception",
"Stereoscopic Video Saliency Prediction",
"Multiple Visual Cues",
"Spatial Depth Information",
"High Level Features",
"Deep Learning Framework",
"Saliency Map",
"Spatio Temporal Coherence",
"3 D Convolutional Neural Network",
"Intrinsic Depth",
"Depth Attributes",
"Large Scale Stereoscopic Video Saliency Dataset",
"Stereoscopic Video Sequences",
"Stereoscopic Video Saliency Detection",
"Human Visual System",
"Stereoscopic Visual Signals",
"Attention Mechanisms",
"Low Level Features",
"Depth Relevant Information",
"Intrinsic Saliency",
"Convolutional Long Short Term Memory Based Fusion Network",
"Temporal Depth Information",
"Deep Learning",
"Visualization",
"Solid Modeling",
"Three Dimensional Displays",
"Stereo Image Processing",
"Video Sequences",
"Visual Systems",
"RGBD Sensors And Analytics",
"3 D From Multiview And Sensors",
"Datasets And Evaluation",
"Deep Learning",
"Video Analytics"
],
"authors": [
{
"affiliation": "City Univ. of Hong Kong",
"fullName": "Qiudan Zhang",
"givenName": "Qiudan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenzhen Univ.",
"fullName": "Xu Wang",
"givenName": "Xu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CityU",
"fullName": "Shiqi Wang",
"givenName": "Shiqi",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenzhen Univ.",
"fullName": "Shikai Li",
"givenName": "Shikai",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "City Univeristy of Hong Kong",
"fullName": "Sam Kwong",
"givenName": "Sam",
"surname": "Kwong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenzhen Univ.",
"fullName": "Jianmin Jiang",
"givenName": "Jianmin",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-06-01T00:00:00",
"pubType": "proceedings",
"pages": "9741-9750",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3293-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "329300j731",
"articleId": "1gyrtLGpbxu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "329300j751",
"articleId": "1gyrXwPjPKU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2016/1552/0/07574768",
"title": "Visual attention modeling for stereoscopic video",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2016/07574768/12OmNBSBk8d",
"parentPublication": {
"id": "proceedings/icmew/2016/1552/0",
"title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890709",
"title": "Learning visual saliency for stereoscopic images",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890709/12OmNqIhFMD",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b707",
"title": "Learning Gaze Transitions from Depth to Improve Video Saliency Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b707/12OmNwwMf0f",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177474",
"title": "A method to compute saliency regions in 3D video based on fusion of feature maps",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177474/12OmNy4r3SF",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/058P1B05",
"title": "Leveraging stereopsis for saliency analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/058P1B05/12OmNy50g5B",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csa/2015/9961/0/9961a006",
"title": "Learning Stereoscopic Visual Attention Model for 3D Video",
"doi": null,
"abstractUrl": "/proceedings-article/csa/2015/9961a006/12OmNyKJiyr",
"parentPublication": {
"id": "proceedings/csa/2015/9961/0",
"title": "2015 International Conference on Computer Science and Applications (CSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2012/4875/0/4875a169",
"title": "Visualizing the Perceived Discomfort of Stereoscopic Video",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2012/4875a169/12OmNz4SOrX",
"parentPublication": {
"id": "proceedings/ism/2012/4875/0",
"title": "2012 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/04/v0686",
"title": "Stereoscopic Video Synthesis from a Monocular Video",
"doi": null,
"abstractUrl": "/journal/tg/2007/04/v0686/13rRUwcAqq7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08269807",
"title": "Saliency in VR: How Do People Explore Virtual Environments?",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08269807/13rRUxDqS8o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b655",
"title": "Warping-Based Stereoscopic 3D Video Retargeting With Depth Remapping",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b655/18j8LvV2AJG",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1i5mkDyiIUg",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1i5mshouNby",
"doi": "10.1109/ICCVW.2019.00148",
"title": "SalGaze: Personalizing Gaze Estimation using Visual Saliency",
"normalizedTitle": "SalGaze: Personalizing Gaze Estimation using Visual Saliency",
"abstract": "Traditional gaze estimation methods typically require explicit user calibration to achieve high accuracy. This process is cumbersome and recalibration is often required when there are changes in factors such as illumination and pose. To address this challenge, we introduce SalGaze, a framework that utilizes saliency information in the visual content to transparently adapt the gaze estimation algorithm to the user without explicit user calibration. We design an algorithm to transform a saliency map into a differentiable loss map that can be used for the optimization of CNN-based models. SalGaze is also able to greatly augment standard point calibration data with implicit video saliency calibration data using a unified framework. We show accuracy improvements over 24% using our technique on existing methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Traditional gaze estimation methods typically require explicit user calibration to achieve high accuracy. This process is cumbersome and recalibration is often required when there are changes in factors such as illumination and pose. To address this challenge, we introduce SalGaze, a framework that utilizes saliency information in the visual content to transparently adapt the gaze estimation algorithm to the user without explicit user calibration. We design an algorithm to transform a saliency map into a differentiable loss map that can be used for the optimization of CNN-based models. SalGaze is also able to greatly augment standard point calibration data with implicit video saliency calibration data using a unified framework. We show accuracy improvements over 24% using our technique on existing methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Traditional gaze estimation methods typically require explicit user calibration to achieve high accuracy. This process is cumbersome and recalibration is often required when there are changes in factors such as illumination and pose. To address this challenge, we introduce SalGaze, a framework that utilizes saliency information in the visual content to transparently adapt the gaze estimation algorithm to the user without explicit user calibration. We design an algorithm to transform a saliency map into a differentiable loss map that can be used for the optimization of CNN-based models. SalGaze is also able to greatly augment standard point calibration data with implicit video saliency calibration data using a unified framework. We show accuracy improvements over 24% using our technique on existing methods.",
"fno": "502300b169",
"keywords": [
"Convolutional Neural Nets",
"Gaze Tracking",
"Optimisation",
"Pose Estimation",
"Video Signal Processing",
"Sal Gaze",
"Visual Saliency",
"Explicit User Calibration",
"Saliency Information",
"Visual Content",
"Gaze Estimation Algorithm",
"Saliency Map",
"Standard Point Calibration Data",
"CNN",
"Estimation",
"Calibration",
"Visualization",
"Machine Learning",
"Head",
"Computational Modeling",
"Lighting",
"Gaze Estimation",
"Saliency",
"Deep Learning",
"Convolutional Neural Network",
"Calibration"
],
"authors": [
{
"affiliation": "Duke University",
"fullName": "Zhuoqing Chang",
"givenName": "Zhuoqing",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Duke University",
"fullName": "J. Matias Di Martino",
"givenName": "J. Matias",
"surname": "Di Martino",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Duke University",
"fullName": "Qiang Qiu",
"givenName": "Qiang",
"surname": "Qiu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Duke University",
"fullName": "Steven Espinosa",
"givenName": "Steven",
"surname": "Espinosa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Duke University",
"fullName": "Guillermo Sapiro",
"givenName": "Guillermo",
"surname": "Sapiro",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1169-1178",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-5023-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "502300b159",
"articleId": "1i5mK4Vexqg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "502300b179",
"articleId": "1i5mNupOr8A",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2011/0394/0/05995675",
"title": "Probabilistic gaze estimation without active personal calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995675/12OmNC8MsAV",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890322",
"title": "Realtime gaze estimation with online calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890322/12OmNvjyxUU",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771469",
"title": "Constraint-based gaze estimation without active calibration",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771469/12OmNwdbVch",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2010/6984/0/05539984",
"title": "Calibration-free gaze sensing using saliency maps",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2010/05539984/12OmNwfsI2l",
"parentPublication": {
"id": "proceedings/cvpr/2010/6984/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/02/ttp2013020329",
"title": "Appearance-Based Gaze Estimation Using Visual Saliency",
"doi": null,
"abstractUrl": "/journal/tp/2013/02/ttp2013020329/13rRUyfKIEn",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlke/2022/9567/0/956700a013",
"title": "A New Automatic User Calibration Algorithm for Three-Dimensional Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/mlke/2022/956700a013/1CY7YuAusPC",
"parentPublication": {
"id": "proceedings/mlke/2022/9567/0",
"title": "2022 International Conference on Machine Learning and Knowledge Engineering (MLKE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09802919",
"title": "Continuous Gaze Tracking With Implicit Saliency-Aware Calibration on Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09802919/1Eo1vvDggH6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/03/08920005",
"title": "A Differential Approach for Gaze Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2021/03/08920005/1fsFnejO2IM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300j367",
"title": "Few-Shot Adaptive Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300j367/1hVlzqYU93y",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093419",
"title": "Offset Calibration for Appearance-Based Gaze Estimation via Gaze Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093419/1jPbibCw0gw",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3njRPpHdC",
"doi": "10.1109/CVPR42600.2020.00453",
"title": "How Much Time Do You Have? Modeling Multi-Duration Saliency",
"normalizedTitle": "How Much Time Do You Have? Modeling Multi-Duration Saliency",
"abstract": "What jumps out in a single glance of an image is different than what you might notice after closer inspection. Yet conventional models of visual saliency produce predictions at an arbitrary, fixed viewing duration, offering a limited view of the rich interactions between image content and gaze location. In this paper we propose to capture gaze as a series of snapshots, by generating population-level saliency heatmaps for multiple viewing durations. We collect the CodeCharts1K dataset, which contains multiple distinct heatmaps per image corresponding to 0.5, 3, and 5 seconds of free-viewing. We develop an LSTM-based model of saliency that simultaneously trains on data from multiple viewing durations. Our Multi-Duration Saliency Excited Model (MD-SEM) achieves competitive performance on the LSUN 2017 Challenge with 57% fewer parameters than comparable architectures. It is the first model that produces heatmaps at multiple viewing durations, enabling applications where multi-duration saliency can be used to prioritize visual content to keep, transmit, and render.",
"abstracts": [
{
"abstractType": "Regular",
"content": "What jumps out in a single glance of an image is different than what you might notice after closer inspection. Yet conventional models of visual saliency produce predictions at an arbitrary, fixed viewing duration, offering a limited view of the rich interactions between image content and gaze location. In this paper we propose to capture gaze as a series of snapshots, by generating population-level saliency heatmaps for multiple viewing durations. We collect the CodeCharts1K dataset, which contains multiple distinct heatmaps per image corresponding to 0.5, 3, and 5 seconds of free-viewing. We develop an LSTM-based model of saliency that simultaneously trains on data from multiple viewing durations. Our Multi-Duration Saliency Excited Model (MD-SEM) achieves competitive performance on the LSUN 2017 Challenge with 57% fewer parameters than comparable architectures. It is the first model that produces heatmaps at multiple viewing durations, enabling applications where multi-duration saliency can be used to prioritize visual content to keep, transmit, and render.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "What jumps out in a single glance of an image is different than what you might notice after closer inspection. Yet conventional models of visual saliency produce predictions at an arbitrary, fixed viewing duration, offering a limited view of the rich interactions between image content and gaze location. In this paper we propose to capture gaze as a series of snapshots, by generating population-level saliency heatmaps for multiple viewing durations. We collect the CodeCharts1K dataset, which contains multiple distinct heatmaps per image corresponding to 0.5, 3, and 5 seconds of free-viewing. We develop an LSTM-based model of saliency that simultaneously trains on data from multiple viewing durations. Our Multi-Duration Saliency Excited Model (MD-SEM) achieves competitive performance on the LSUN 2017 Challenge with 57% fewer parameters than comparable architectures. It is the first model that produces heatmaps at multiple viewing durations, enabling applications where multi-duration saliency can be used to prioritize visual content to keep, transmit, and render.",
"fno": "716800e472",
"keywords": [
"Learning Artificial Intelligence",
"Object Detection",
"Rendering Computer Graphics",
"Robot Vision",
"Visual Saliency Produce Predictions",
"Arbitrary Viewing Duration",
"Fixed Viewing Duration",
"Image Content",
"Population Level Saliency Heatmaps",
"Multiple Viewing Durations",
"Multiple Distinct Heatmaps",
"Free Viewing",
"LSTM Based Model",
"Multiduration Saliency Excited Model",
"Modeling Multiduration Saliency Excited Model",
"MD SEM",
"LSUN 2017 Challenge",
"Visual Content",
"Code Charts 1 K Dataset",
"Face",
"Heating Systems",
"Predictive Models",
"Data Models",
"Computational Modeling",
"Visualization",
"Task Analysis"
],
"authors": [
{
"affiliation": "Massachusetts Institute of Technology",
"fullName": "Camilo Fosco",
"givenName": "Camilo",
"surname": "Fosco",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Massachusetts Institute of Technology",
"fullName": "Anelise Newman",
"givenName": "Anelise",
"surname": "Newman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Harvard University",
"fullName": "Pat Sukhum",
"givenName": "Pat",
"surname": "Sukhum",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Harvard University",
"fullName": "Yun Bin Zhang",
"givenName": "Yun Bin",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "City University of Hong Kong",
"fullName": "Nanxuan Zhao",
"givenName": "Nanxuan",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Massachusetts Institute of Technology",
"fullName": "Aude Oliva",
"givenName": "Aude",
"surname": "Oliva",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Zoya Bylinskii",
"givenName": "Zoya",
"surname": "Bylinskii",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4472-4481",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800e462",
"articleId": "1m3nBH0CDpC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800e482",
"articleId": "1m3nknHSDUQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457d135",
"title": "Top-Down Visual Saliency Guided by Captions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d135/12OmNvSbBOR",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/5/01327110",
"title": "Modeling syllable duration in Indian languages using neural networks",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01327110/12OmNxX3uQa",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/5",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601b529",
"title": "Saliency Prediction for Mobile User Interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b529/12OmNzdGnsJ",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08269807",
"title": "Saliency in VR: How Do People Explore Virtual Environments?",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08269807/13rRUxDqS8o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/03/08315047",
"title": "What Do Different Evaluation Metrics Tell Us About Saliency Models?",
"doi": null,
"abstractUrl": "/journal/tp/2019/03/08315047/17D45Vw15wU",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b420",
"title": "Cube Padding for Weakly-Supervised Saliency Prediction in 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b420/17D45WB0qcO",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/01/08744328",
"title": "Revisiting Video Saliency Prediction in the Deep Learning Era",
"doi": null,
"abstractUrl": "/journal/tp/2021/01/08744328/1bYPA2ONQT6",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i836",
"title": "There and Back Again: Revisiting Backpropagation Saliency Methods",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i836/1m3nKiB5Z8Q",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2130",
"title": "Inferring Attention Shift Ranks of Objects for Image Saliency",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2130/1m3nkkxsdUc",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412308",
"title": "Classifying Eye-Tracking Data Using Saliency Maps",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412308/1tmiNBHZZDO",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnWQmeJsZi",
"doi": "10.1109/VRW52623.2021.00236",
"title": "[DC] Eye Fixation Forecasting in Task-Oriented Virtual Reality",
"normalizedTitle": "[DC] Eye Fixation Forecasting in Task-Oriented Virtual Reality",
"abstract": "In immersive virtual reality (VR), users' visual attention is crucial for many important applications, including VR content design, gaze-based interaction, and gaze-contingent rendering. Especially, information on users' future eye fixations is key for intelligent user interfaces and has significant relevance for many areas, such as visual attention enhancement, dynamic event triggering, and human-computer interaction. However, previous works typically focused on free-viewing conditions and paid less attention to task-oriented attention. This paper aims at forecasting users' eye fixations in task-oriented virtual reality. To this end, a VR eye tracking dataset that corresponds to different users performing a visual search task in immersive virtual environments is built. A comprehensive analysis of users' eye fixations is performed based on the collected data. The analysis reveals that eye fixations are correlated with users' historical gaze positions, task-related objects, saliency information of the VR content, and head rotation velocities. Based on this analysis, a novel learning-based model is proposed to forecast users' eye fixations in the near future in immersive virtual environments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In immersive virtual reality (VR), users' visual attention is crucial for many important applications, including VR content design, gaze-based interaction, and gaze-contingent rendering. Especially, information on users' future eye fixations is key for intelligent user interfaces and has significant relevance for many areas, such as visual attention enhancement, dynamic event triggering, and human-computer interaction. However, previous works typically focused on free-viewing conditions and paid less attention to task-oriented attention. This paper aims at forecasting users' eye fixations in task-oriented virtual reality. To this end, a VR eye tracking dataset that corresponds to different users performing a visual search task in immersive virtual environments is built. A comprehensive analysis of users' eye fixations is performed based on the collected data. The analysis reveals that eye fixations are correlated with users' historical gaze positions, task-related objects, saliency information of the VR content, and head rotation velocities. Based on this analysis, a novel learning-based model is proposed to forecast users' eye fixations in the near future in immersive virtual environments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In immersive virtual reality (VR), users' visual attention is crucial for many important applications, including VR content design, gaze-based interaction, and gaze-contingent rendering. Especially, information on users' future eye fixations is key for intelligent user interfaces and has significant relevance for many areas, such as visual attention enhancement, dynamic event triggering, and human-computer interaction. However, previous works typically focused on free-viewing conditions and paid less attention to task-oriented attention. This paper aims at forecasting users' eye fixations in task-oriented virtual reality. To this end, a VR eye tracking dataset that corresponds to different users performing a visual search task in immersive virtual environments is built. A comprehensive analysis of users' eye fixations is performed based on the collected data. The analysis reveals that eye fixations are correlated with users' historical gaze positions, task-related objects, saliency information of the VR content, and head rotation velocities. Based on this analysis, a novel learning-based model is proposed to forecast users' eye fixations in the near future in immersive virtual environments.",
"fno": "405700a707",
"keywords": [
"Learning Artificial Intelligence",
"Rendering Computer Graphics",
"User Interfaces",
"Virtual Reality",
"Visual Perception",
"Immersive Virtual Reality",
"VR Content Design",
"Gaze Based Interaction",
"Gaze Contingent Rendering",
"Intelligent User Interfaces",
"Visual Attention Enhancement",
"Human Computer Interaction",
"Task Oriented Attention",
"Forecasting Users",
"Task Oriented Virtual Reality",
"VR Eye Tracking Dataset",
"Visual Search Task",
"Immersive Virtual Environments",
"Eye Fixations",
"Task Related Objects",
"DC Eye Fixation Forecasting",
"Visualization",
"Solid Modeling",
"Three Dimensional Displays",
"Conferences",
"Virtual Environments",
"Gaze Tracking",
"Predictive Models",
"Fixation Forecasting X 2014 Visual Attention X 2014 Visual Search X 2014 Eye Tracking Deep Learning X 2014 Convolutional Neural Network X 2014 Virtual Reality"
],
"authors": [
{
"affiliation": "Peking University",
"fullName": "Zhiming Hu",
"givenName": "Zhiming",
"surname": "Hu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "707-708",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a705",
"articleId": "1tnXg9U1MXu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a709",
"articleId": "1tnXkutoPi8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2010/06/ttg2010060953",
"title": "eSeeTrack—Visualizing Sequential Fixation Patterns",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010060953/13rRUwInvsJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a493",
"title": "Eye Tracking-based LSTM for Locomotion Prediction in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a493/1CJcrKWnUtO",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a170",
"title": "Development and evaluation of car training system using VR and eye tracking technology",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a170/1GU75yVJubS",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a530",
"title": "The Evaluation of Gait-Free Locomotion Methods with Eye Movement in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a530/1J7WtHqguHu",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a422",
"title": "Behavior Analysis of Indoor Escape Route-Finding Based on Head-Mounted VR and Eye Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a422/1ehBGoaPHhK",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089637",
"title": "Eye-Gaze Activity in Crowds: Impact of Virtual Reality and Density",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089637/1jIx9WIWd5C",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089578",
"title": "Exploring Eye Gaze Visualization Techniques for Identifying Distracted Students in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089578/1jIxfimnIaY",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090559",
"title": "A Methodology of Eye Gazing Attention Determination for VR Training",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090559/1jIxoACmybu",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090461",
"title": "Front Camera Eye Tracking For Mobile VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382883",
"title": "FixationNet: Forecasting Eye Fixations in Task-Oriented Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382883/1saZvDJS360",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBRbkpf",
"doi": "10.1109/VR.2016.7504735",
"title": "FaceBo: Real-time face and body tracking for faithful avatar synthesis",
"normalizedTitle": "FaceBo: Real-time face and body tracking for faithful avatar synthesis",
"abstract": "This paper introduces a low-cost framework capable of combining both real-time markerless face and body tracking for faithful avatar embodiment in Virtual Reality (VR). We discuss suitable hardware and software solutions and present a first prototype. This work lays the technological basis for further research on the importance of the appearance and behavioral realism of avatars, e.g., for the illusion of virtual body ownership, for social interactions in VR, as well as for VR entertainment applications (immersive games or movies).",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a low-cost framework capable of combining both real-time markerless face and body tracking for faithful avatar embodiment in Virtual Reality (VR). We discuss suitable hardware and software solutions and present a first prototype. This work lays the technological basis for further research on the importance of the appearance and behavioral realism of avatars, e.g., for the illusion of virtual body ownership, for social interactions in VR, as well as for VR entertainment applications (immersive games or movies).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a low-cost framework capable of combining both real-time markerless face and body tracking for faithful avatar embodiment in Virtual Reality (VR). We discuss suitable hardware and software solutions and present a first prototype. This work lays the technological basis for further research on the importance of the appearance and behavioral realism of avatars, e.g., for the illusion of virtual body ownership, for social interactions in VR, as well as for VR entertainment applications (immersive games or movies).",
"fno": "07504735",
"keywords": [
"Avatars",
"Software",
"Face",
"Tracking",
"Real Time Systems",
"Prototypes",
"H 5 1 Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "University of Würzburg",
"fullName": "Jean-Luc Lugrin",
"givenName": "Jean-Luc",
"surname": "Lugrin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg",
"fullName": "David Zilch",
"givenName": "David",
"surname": "Zilch",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Wurzburg, University of Cologne",
"fullName": "Daniel Roth",
"givenName": "Daniel",
"surname": "Roth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Michigan State University, University of Cologne",
"fullName": "Gary Bente",
"givenName": "Gary",
"surname": "Bente",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg",
"fullName": "Marc Erich Latoschik",
"givenName": "Marc Erich",
"surname": "Latoschik",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "225-226",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504734",
"articleId": "12OmNwnYG0I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504736",
"articleId": "12OmNC4wtFQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223379",
"title": "Avatar anthropomorphism and illusion of body ownership in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223379/12OmNAWpyrk",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892278",
"title": "Bodiless embodiment: A descriptive survey of avatar bodily coherence in first-wave consumer VR applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892278/12OmNvnwVj4",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549373",
"title": "Integrating head and full-body tracking for embodiment in virtual characters",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549373/12OmNx0RIVC",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446229",
"title": "Any “Body” There? Avatar Visibility Effects in a Virtual Reality Game",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446229/13bd1fHrlRx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040591",
"title": "An Evaluation of Self-Avatar Eye Movement for Virtual Embodiment",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040591/13rRUyYBlgz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/10/08695841",
"title": "A Survey of Full-Body Motion Reconstruction in Immersive Virtual Reality Applications",
"doi": null,
"abstractUrl": "/journal/tg/2020/10/08695841/19sOOGyyt56",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a350",
"title": "Exploring Presence, Avatar Embodiment, and Body Perception with a Holographic Augmented Reality Mirror",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a350/1CJcn3q3J5K",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797884",
"title": "Distributed, Collaborative Virtual Reality Application for Product Development with Simple Avatar Calibration Method",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797884/1cJ0TJmlU9q",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090630",
"title": "Embodied Realistic Avatar System with Body Motions and Facial Expressions for Communication in Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090630/1jIxtbZL30Y",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1grPb25cymQ",
"title": "2019 IEEE 19th International Conference on Bioinformatics and Bioengineering (BIBE)",
"acronym": "bibe",
"groupId": "1000075",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1grPfjAXxOo",
"doi": "10.1109/BIBE.2019.00117",
"title": "Meta-Learning for Avatar Kinematics Reconstruction in Virtual Reality Rehabilitation",
"normalizedTitle": "Meta-Learning for Avatar Kinematics Reconstruction in Virtual Reality Rehabilitation",
"abstract": "Virtual Reality (VR) sensorimotor rehabilitation is still in infancy but will soon require avatars, digital alter-egos of patients' physical selves. Such embodied interfaces could stimulate patients' perception in a rich and highly customized environment, where sensorimotor deficits, such as in Chemotherapy-Induced Peripheral Neuropathy, could be corrected. In such scenarios, motion prediction is a key ingredient for realistic immersion. Yet, such a task lives under hard processing latency constraints and the inherent variability of human motion. We propose a neural network meta-learning system exploiting the underlying correlations in body kinematics with potential to provide, within latency guarantees, personalized VR rehabilitation. The unsupervised meta-learner is able to extract underlying statistics of the motion data by exploiting data regularities in order to describe the underlying manifold, or structure, of motion under sensorimotor deficits. We demonstrate, through preliminary experiments the potential of such a learning system for adaptive kinematics estimation in personalized rehabilitation VR avatars.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality (VR) sensorimotor rehabilitation is still in infancy but will soon require avatars, digital alter-egos of patients' physical selves. Such embodied interfaces could stimulate patients' perception in a rich and highly customized environment, where sensorimotor deficits, such as in Chemotherapy-Induced Peripheral Neuropathy, could be corrected. In such scenarios, motion prediction is a key ingredient for realistic immersion. Yet, such a task lives under hard processing latency constraints and the inherent variability of human motion. We propose a neural network meta-learning system exploiting the underlying correlations in body kinematics with potential to provide, within latency guarantees, personalized VR rehabilitation. The unsupervised meta-learner is able to extract underlying statistics of the motion data by exploiting data regularities in order to describe the underlying manifold, or structure, of motion under sensorimotor deficits. We demonstrate, through preliminary experiments the potential of such a learning system for adaptive kinematics estimation in personalized rehabilitation VR avatars.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality (VR) sensorimotor rehabilitation is still in infancy but will soon require avatars, digital alter-egos of patients' physical selves. Such embodied interfaces could stimulate patients' perception in a rich and highly customized environment, where sensorimotor deficits, such as in Chemotherapy-Induced Peripheral Neuropathy, could be corrected. In such scenarios, motion prediction is a key ingredient for realistic immersion. Yet, such a task lives under hard processing latency constraints and the inherent variability of human motion. We propose a neural network meta-learning system exploiting the underlying correlations in body kinematics with potential to provide, within latency guarantees, personalized VR rehabilitation. The unsupervised meta-learner is able to extract underlying statistics of the motion data by exploiting data regularities in order to describe the underlying manifold, or structure, of motion under sensorimotor deficits. We demonstrate, through preliminary experiments the potential of such a learning system for adaptive kinematics estimation in personalized rehabilitation VR avatars.",
"fno": "461700a617",
"keywords": [
"Avatars",
"Gait Analysis",
"Image Reconstruction",
"Kinematics",
"Learning Artificial Intelligence",
"Medical Computing",
"Medical Image Processing",
"Neurophysiology",
"Patient Rehabilitation",
"Patient Treatment",
"Virtual Reality Sensorimotor Rehabilitation",
"Digital Alter Egos",
"Patient Perception",
"Motion Prediction",
"Chemotherapy Induced Peripheral Neuropathy",
"Avatar Kinematics Reconstruction",
"Personalized Rehabilitation VR Avatars",
"Adaptive Kinematics Estimation",
"Unsupervised Meta Learner",
"Personalized VR Rehabilitation",
"Body Kinematics",
"Neural Network Meta Learning System",
"Human Motion",
"Inherent Variability",
"Hard Processing Latency Constraints",
"Correlation",
"Avatars",
"Neural Networks",
"Kinematics",
"Oncology",
"Motion Measurement",
"Task Analysis",
"Neural Networks",
"Virtual Reality",
"Inverse Kinematics",
"Meta Learning",
"Rehabilitation",
"Chemotherapy Induced Peripheral Neuropathy"
],
"authors": [
{
"affiliation": "Audi Konfuzius Institut Ingolstadt / Technical University of Ingolstadt",
"fullName": "Cristian Axenie",
"givenName": "Cristian",
"surname": "Axenie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Hochschule Ingolstadt",
"fullName": "Armin Becher",
"givenName": "Armin",
"surname": "Becher",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interdisciplinary Breast Center, Helios Clinic Munich West",
"fullName": "Daria Kurz",
"givenName": "Daria",
"surname": "Kurz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Hochschule Ingolstadt",
"fullName": "Thomas Grauschopf",
"givenName": "Thomas",
"surname": "Grauschopf",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibe",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "617-624",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4617-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "461700a609",
"articleId": "1grPhXkXcRO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "461700a625",
"articleId": "1grPfDOaz5e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciicii/2017/2434/0/2434a177",
"title": "Kinematics Analysis and Trajectory Planning of Upper Limb Rehabilitation Robot",
"doi": null,
"abstractUrl": "/proceedings-article/iciicii/2017/2434a177/12OmNA0MZ3G",
"parentPublication": {
"id": "proceedings/iciicii/2017/2434/0",
"title": "2017 International Conference on Industrial Informatics - Computing Technology, Intelligent Technology, Industrial Information Integration (ICIICII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571105",
"title": "Investigating the Trend of Virtual Reality-Based Stroke Rehabilitation Systems",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571105/12OmNCvLXZ2",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2017/6724/0/07926560",
"title": "Gamification of Hand Rehabilitation Process Using Virtual Reality Tools: Using Leap Motion for Hand Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2017/07926560/12OmNqGiu27",
"parentPublication": {
"id": "proceedings/irc/2017/6724/0",
"title": "2017 First IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2007/2900/0/29000798",
"title": "A Clinical Virtual Reality Rehabilitation System for Phobia Treatment",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2007/29000798/12OmNx9nGE0",
"parentPublication": {
"id": "proceedings/iv/2007/2900/0",
"title": "2007 11th International Conference Information Visualization (IV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/02/06840898",
"title": "Guest Editorial: Special Issue on Haptics in Rehabilitation and Neural Engineering",
"doi": null,
"abstractUrl": "/journal/th/2014/02/06840898/13rRUxBrGha",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699215",
"title": "VIRTOOAIR: Virtual Reality TOOlbox for Avatar Intelligent Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699215/19F1Ug56qB2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2021/3734/0/373400a123",
"title": "Inverse kinematics for full-body self representation in VR-based cognitive rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a123/1A3j8qqoU8g",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797884",
"title": "Distributed, Collaborative Virtual Reality Application for Product Development with Simple Avatar Calibration Method",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797884/1cJ0TJmlU9q",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797872",
"title": "[DC] VR Simulation as a Motivator in Gait Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797872/1cJ15Qs6tnG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a274",
"title": "Inverse Kinematics and Temporal Convolutional Networks for Sequential Pose Analysis in VR",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a274/1qpzAz62YQ8",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1wLcd132uKA",
"title": "2021 IEEE 45th Annual Computers, Software, and Applications Conference (COMPSAC)",
"acronym": "compsac",
"groupId": "1000143",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1wLcE9cNine",
"doi": "10.1109/COMPSAC51774.2021.00141",
"title": "How Do Avatar Appearances Affect Communication from Others?",
"normalizedTitle": "How Do Avatar Appearances Affect Communication from Others?",
"abstract": "Many systems and services for VR spaces use avatars to represent the appearances of each user. In those systems and services, the users communicate with each other via their avatars. Currently, various techniques and researches have been studied for avatar appearance. However, the existing techniques and studies mainly regard avatars as the self-expression of each user and do not mention the influences on communication from others. In communication content, avatars are one of self-expression of each user and one of the communication tools. Therefore, this paper describes an investigation of how avatar appearances affect communication from others. The result from the research showed that the degree of deformation of avatars affects communication from others in various situations such as informal and formal situations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many systems and services for VR spaces use avatars to represent the appearances of each user. In those systems and services, the users communicate with each other via their avatars. Currently, various techniques and researches have been studied for avatar appearance. However, the existing techniques and studies mainly regard avatars as the self-expression of each user and do not mention the influences on communication from others. In communication content, avatars are one of self-expression of each user and one of the communication tools. Therefore, this paper describes an investigation of how avatar appearances affect communication from others. The result from the research showed that the degree of deformation of avatars affects communication from others in various situations such as informal and formal situations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many systems and services for VR spaces use avatars to represent the appearances of each user. In those systems and services, the users communicate with each other via their avatars. Currently, various techniques and researches have been studied for avatar appearance. However, the existing techniques and studies mainly regard avatars as the self-expression of each user and do not mention the influences on communication from others. In communication content, avatars are one of self-expression of each user and one of the communication tools. Therefore, this paper describes an investigation of how avatar appearances affect communication from others. The result from the research showed that the degree of deformation of avatars affects communication from others in various situations such as informal and formal situations.",
"fno": "246300b036",
"keywords": [
"Avatars",
"Virtual Reality",
"Avatar Appearance",
"Communication Content",
"Communication Tools",
"VR Spaces",
"Formal Situations",
"Informal Situations",
"Avatars",
"Conferences",
"Tools",
"Software",
"Strain",
"VR",
"Virtual Communication",
"Body Proportions",
"Degree Of Deformation"
],
"authors": [
{
"affiliation": "University of Fukui,Graduate School of Engineering,Fukui,Japan",
"fullName": "Yasuaki Kobayashi",
"givenName": "Yasuaki",
"surname": "Kobayashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Fukui,Graduate School of Engineering,Fukui,Japan",
"fullName": "Tomoya Kawakami",
"givenName": "Tomoya",
"surname": "Kawakami",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University,Cybermedia Center,Osaka,Japan",
"fullName": "Satoru Matsumoto",
"givenName": "Satoru",
"surname": "Matsumoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University,Cybermedia Center,Osaka,Japan",
"fullName": "Tomoki Yoshihisa",
"givenName": "Tomoki",
"surname": "Yoshihisa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University,Cybermedia Center,Osaka,Japan",
"fullName": "Yuuichi Teranishi",
"givenName": "Yuuichi",
"surname": "Teranishi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University,Cybermedia Center,Osaka,Japan",
"fullName": "Shinji Shimojo",
"givenName": "Shinji",
"surname": "Shimojo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "compsac",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1036-1039",
"year": "2021",
"issn": "0730-3157",
"isbn": "978-1-6654-2463-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "246300b032",
"articleId": "1wLcvEw5rFe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "246300b040",
"articleId": "1wLcKp4NK92",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccps/2013/1996/0/06604013",
"title": "Architecture of a cyberphysical avatar",
"doi": null,
"abstractUrl": "/proceedings-article/iccps/2013/06604013/12OmNA0vo1s",
"parentPublication": {
"id": "proceedings/iccps/2013/1996/0",
"title": "2013 ACM/IEEE International Conference on Cyber-Physical Systems (ICCPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a325",
"title": "Instant Messenger with Personalized 3D Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a325/12OmNAkEU6d",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2011/468/0/06142700",
"title": "Does an avatar motivate?",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2011/06142700/12OmNCcKQGG",
"parentPublication": {
"id": "proceedings/fie/2011/468/0",
"title": "2011 Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892275",
"title": "Socially immersive avatar-based communication",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892275/12OmNwEJ0VR",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2017/6716/0/07893357",
"title": "Influence of avatar appearance on presence in social VR",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2017/07893357/12OmNwwuDSr",
"parentPublication": {
"id": "proceedings/3dui/2017/6716/0",
"title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a724",
"title": "Multimodal Affect Recognition in Virtual Worlds: Avatars Mirroring User's Affect",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a724/12OmNzahbSm",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798318",
"title": "Evaluating Teacher Avatar Appearances in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798318/1cJ0P8vBhhm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/07/08952604",
"title": "Effect of Avatar Appearance on Detection Thresholds for Remapped Hand Movements",
"doi": null,
"abstractUrl": "/journal/tg/2021/07/08952604/1gqqhpSZ19S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": [
{
"id": "1xwipeRBqJa",
"videoExt": "mp4",
"videoType": {
"featured": false,
"recommended": false,
"sponsored": false,
"__typename": "VideoTypesType"
},
"article": {
"id": "1wLcE9cNine",
"fno": "246300b036",
"issueNum": null,
"pubType": "proceedings",
"volume": "0",
"year": "2021",
"idPrefix": "compsac",
"doi": "10.1109/COMPSAC51774.2021.00141",
"title": "How Do Avatar Appearances Affect Communication from Others?",
"__typename": "ArticleType"
},
"channel": {
"id": "1xvX5qkErBu",
"title": "COMPSAC 2021",
"status": "1",
"featured": false,
"defaultVideoId": "1xvX5cz6Dtu",
"category": {
"id": "1xvX5lT1WiQ",
"title": "Proceeding",
"type": "proceeding",
"__typename": "VideoCategoryType"
},
"__typename": "VideoChannelType"
},
"year": "2021",
"title": "How Do Avatar Appearances Affect Communication from Others?",
"description": "Many systems and services for VR spaces use avatars to represent the appearances of each user. In those systems and services, the users communicate with each other via their avatars. Currently, various techniques and researches have been studied for avatar appearance. However, the existing techniques and studies mainly regard avatars as the self-expression of each user and do not mention the influences on communication from others. In communication content, avatars are one of self-expression of each user and one of the communication tools. Therefore, this paper describes an investigation of how avatar appearances affect communication from others. The result from the research showed that the degree of deformation of avatars affects communication from others in various situations such as informal and formal situations.",
"keywords": [
{
"id": "1xvX5I2jhAY",
"title": "Conferences",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1xvX5PnBSes",
"title": "Software",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1xvX6nWYXpC",
"title": "Tools",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1xw9AFOOHTi",
"title": "VR",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1xwipn95N1S",
"title": "Avatars",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1xwipptzCeI",
"title": "Strain",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1xwiprHOgA8",
"title": "virtual communication",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1xwiptTM7T2",
"title": "body proportions",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1xwipwefX5S",
"title": "degree of deformation",
"status": "1",
"__typename": "VideoKeywordsType"
}
],
"speakers": [
{
"firstName": "Yasuaki",
"lastName": "Kobayashi",
"affiliation": "University of Fukui,Graduate School of Engineering,Fukui,Japan",
"__typename": "SpeakerType"
},
{
"firstName": "Tomoya",
"lastName": "Kawakami",
"affiliation": "University of Fukui,Graduate School of Engineering,Fukui,Japan",
"__typename": "SpeakerType"
},
{
"firstName": "Satoru",
"lastName": "Matsumoto",
"affiliation": "Osaka University,Cybermedia Center,Osaka,Japan",
"__typename": "SpeakerType"
},
{
"firstName": "Tomoki",
"lastName": "Yoshihisa",
"affiliation": "Osaka University,Cybermedia Center,Osaka,Japan",
"__typename": "SpeakerType"
},
{
"firstName": "Yuuichi",
"lastName": "Teranishi",
"affiliation": "Osaka University,Cybermedia Center,Osaka,Japan",
"__typename": "SpeakerType"
},
{
"firstName": "Shinji",
"lastName": "Shimojo",
"affiliation": "Osaka University,Cybermedia Center,Osaka,Japan",
"__typename": "SpeakerType"
}
],
"created": "2021-10-08T00:00:00",
"updated": "2021-10-08T00:00:00",
"imageThumbnailUrl": "thumbnails/1xwipeRBqJa.jpeg",
"runningTime": "00:08:13",
"aspectRatio": "16:9",
"metrics": {
"views": "0",
"likes": "0",
"__typename": "VideoMetricsType"
},
"notShowInVideoLib": false,
"__typename": "VideoType"
}
]
} |
{
"proceeding": {
"id": "12OmNyjLoRw",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAYXWzp",
"doi": "10.1109/ISMAR.2014.6948403",
"title": "Thermal touch: Thermography-enabled everywhere touch interfaces for mobile augmented reality applications",
"normalizedTitle": "Thermal touch: Thermography-enabled everywhere touch interfaces for mobile augmented reality applications",
"abstract": "We present an approach that makes any real object a true touch interface for mobile Augmented Reality applications. Using infrared thermography, we detect residual heat resulting from a warm fingertip touching the colder surface of an object. This approach can clearly distinguish if a surface has actually been touched, or if a finger only approached it without any physical contact, and hence significantly less heat transfer. Once a touch has been detected in the thermal image, we determine the corresponding 3D position on the touched object based on visual object tracking using a visible light camera. Finally the 3D position of the touch is used by human machine interfaces for Augmented Reality providing natural means to interact with real and virtual objects. The emergence of wearable computers and head-mounted displays desires for alternatives to a touch screen, which is the primary user interface in handheld Augmented Reality applications. Voice control and touchpads provide a useful alternative to interact with wearables for certain tasks, but particularly common interaction tasks in Augmented Reality require to accurately select or define 3D points on real surfaces. We propose to enable this kind of interaction by simply touching the respective surface with a fingertip. Based on tests with a variety of different materials and different users, we show that our method enables intuitive interaction for mobile Augmented Reality with most common objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an approach that makes any real object a true touch interface for mobile Augmented Reality applications. Using infrared thermography, we detect residual heat resulting from a warm fingertip touching the colder surface of an object. This approach can clearly distinguish if a surface has actually been touched, or if a finger only approached it without any physical contact, and hence significantly less heat transfer. Once a touch has been detected in the thermal image, we determine the corresponding 3D position on the touched object based on visual object tracking using a visible light camera. Finally the 3D position of the touch is used by human machine interfaces for Augmented Reality providing natural means to interact with real and virtual objects. The emergence of wearable computers and head-mounted displays desires for alternatives to a touch screen, which is the primary user interface in handheld Augmented Reality applications. Voice control and touchpads provide a useful alternative to interact with wearables for certain tasks, but particularly common interaction tasks in Augmented Reality require to accurately select or define 3D points on real surfaces. We propose to enable this kind of interaction by simply touching the respective surface with a fingertip. Based on tests with a variety of different materials and different users, we show that our method enables intuitive interaction for mobile Augmented Reality with most common objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an approach that makes any real object a true touch interface for mobile Augmented Reality applications. Using infrared thermography, we detect residual heat resulting from a warm fingertip touching the colder surface of an object. This approach can clearly distinguish if a surface has actually been touched, or if a finger only approached it without any physical contact, and hence significantly less heat transfer. Once a touch has been detected in the thermal image, we determine the corresponding 3D position on the touched object based on visual object tracking using a visible light camera. Finally the 3D position of the touch is used by human machine interfaces for Augmented Reality providing natural means to interact with real and virtual objects. The emergence of wearable computers and head-mounted displays desires for alternatives to a touch screen, which is the primary user interface in handheld Augmented Reality applications. Voice control and touchpads provide a useful alternative to interact with wearables for certain tasks, but particularly common interaction tasks in Augmented Reality require to accurately select or define 3D points on real surfaces. We propose to enable this kind of interaction by simply touching the respective surface with a fingertip. Based on tests with a variety of different materials and different users, we show that our method enables intuitive interaction for mobile Augmented Reality with most common objects.",
"fno": "06948403",
"keywords": [
"Cameras",
"Three Dimensional Displays",
"Temperature Measurement",
"Materials",
"Augmented Reality",
"User Interfaces",
"Heating",
"Artificial Augmented Virtual Realities Evaluation Methodology",
"H 5 2 User Interfaces Input Devices And Strategies Graphical User Interfaces",
"H 5 1 Multimedia Information Systems"
],
"authors": [
{
"affiliation": "Metaio GmbH",
"fullName": "Daniel Kurz",
"givenName": "Daniel",
"surname": "Kurz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "9-16",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6184-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06948402",
"articleId": "12OmNySosKY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06948404",
"articleId": "12OmNvnOwuE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671841",
"title": "Markerless 3D gesture-based interaction for handheld Augmented Reality interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671841/12OmNAIvcZU",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptic/2006/0226/0/01627071",
"title": "Material Property Recognition by Active Tapping for Fingertip Digitizing",
"doi": null,
"abstractUrl": "/proceedings-article/haptic/2006/01627071/12OmNAndinM",
"parentPublication": {
"id": "proceedings/haptic/2006/0226/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550212",
"title": "Poster: Markerless fingertip-based 3D interaction for handheld augmented reality in a small workspace",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550212/12OmNBsue2b",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948450",
"title": "[Poster] Combining multi-touch and device movement in mobile augmented reality manipulations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948450/12OmNxWcHdT",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948460",
"title": "[Poster] A preliminary study on altering surface softness perception using augmented color and deformation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948460/12OmNyQph5r",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2011/2183/0/06162913",
"title": "Comparing spatial understanding between touch-based and AR-style interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2011/06162913/12OmNyVes7B",
"parentPublication": {
"id": "proceedings/ismar/2011/2183/0",
"title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671793",
"title": "Passive Deformable Haptic glove to support 3D interactions in mobile augmented reality environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671793/12OmNz5JBPu",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948485",
"title": "[Demo] Thermal touch: Thermography-enabled everywhere touch interfaces for mobile augmented reality applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948485/12OmNzl3WSr",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446426",
"title": "Walk-Centric User Interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446426/13bd1fHrlRZ",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdsba/2020/8164/0/816400a050",
"title": "TIP: Tangible Interactive Projector with Projection Touch Tracking and Spatial Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icdsba/2020/816400a050/1xeWoj58YyA",
"parentPublication": {
"id": "proceedings/icdsba/2020/8164/0",
"title": "2020 4th Annual International Conference on Data Science and Business Analytics (ICDSBA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwl8GHU",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAsk4A7",
"doi": "10.1109/3DUI.2013.6550188",
"title": "ForceExtension: Extending isotonic position-controlled multi-touch gestures with rate-controlled force sensing for 3D manipulation",
"normalizedTitle": "ForceExtension: Extending isotonic position-controlled multi-touch gestures with rate-controlled force sensing for 3D manipulation",
"abstract": "Recent advances in multi-touch technology have enabled pressure sensing of each touch point on a multi-touch touchpad in addition to position tracking. In this paper we propose two novel approaches for utilizing this extra dimension of input to extend the effect range of position controlled multi-touch gestures. Both ForceExtension approaches are only activated when the averaged force of all active fingers reaches a threshold. The first approach, context-force extension, tracks the most recent position-control movement as the context and combines it with the force input as an isometric rate-controlled extension. The second approach, shear-force extension, scales the micro displacement of the active fingers with the force input to simulate shear-force sensing as a viscoelastic rate-controlled extension. We collected feedback from several users who were asked to perform a 3D search task using variations of these interfaces. A single force sensing multi-touch touchpad was used to control the first-person camera during the search, and the multi-touch gestures to pan, rotate, and zoom the 3D camera were augmented through ForceExtension. Users preferred a medium gain position control combined with the context-force extension.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent advances in multi-touch technology have enabled pressure sensing of each touch point on a multi-touch touchpad in addition to position tracking. In this paper we propose two novel approaches for utilizing this extra dimension of input to extend the effect range of position controlled multi-touch gestures. Both ForceExtension approaches are only activated when the averaged force of all active fingers reaches a threshold. The first approach, context-force extension, tracks the most recent position-control movement as the context and combines it with the force input as an isometric rate-controlled extension. The second approach, shear-force extension, scales the micro displacement of the active fingers with the force input to simulate shear-force sensing as a viscoelastic rate-controlled extension. We collected feedback from several users who were asked to perform a 3D search task using variations of these interfaces. A single force sensing multi-touch touchpad was used to control the first-person camera during the search, and the multi-touch gestures to pan, rotate, and zoom the 3D camera were augmented through ForceExtension. Users preferred a medium gain position control combined with the context-force extension.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent advances in multi-touch technology have enabled pressure sensing of each touch point on a multi-touch touchpad in addition to position tracking. In this paper we propose two novel approaches for utilizing this extra dimension of input to extend the effect range of position controlled multi-touch gestures. Both ForceExtension approaches are only activated when the averaged force of all active fingers reaches a threshold. The first approach, context-force extension, tracks the most recent position-control movement as the context and combines it with the force input as an isometric rate-controlled extension. The second approach, shear-force extension, scales the micro displacement of the active fingers with the force input to simulate shear-force sensing as a viscoelastic rate-controlled extension. We collected feedback from several users who were asked to perform a 3D search task using variations of these interfaces. A single force sensing multi-touch touchpad was used to control the first-person camera during the search, and the multi-touch gestures to pan, rotate, and zoom the 3D camera were augmented through ForceExtension. Users preferred a medium gain position control combined with the context-force extension.",
"fno": "06550188",
"keywords": [
"Force",
"Position Control",
"Sensors",
"Thumb",
"Cameras",
"Aerospace Electronics",
"Hybrid Solution",
"Force Sensing Touchpad",
"Multi Touch",
"Position Control",
"Rate Control",
"Isotonic And Isometric"
],
"authors": [
{
"affiliation": "HIVE Lab., Worcester Polytech. Inst., Worcester, MA, USA",
"fullName": "Jia Wang",
"givenName": null,
"surname": "Jia Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "HIVE Lab., Worcester Polytech. Inst., Worcester, MA, USA",
"fullName": "Robert W. Lindeman",
"givenName": "Robert W.",
"surname": "Lindeman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-03-01T00:00:00",
"pubType": "proceedings",
"pages": "3-6",
"year": "2013",
"issn": null,
"isbn": "978-1-4673-6097-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06550186",
"articleId": "12OmNrYCXMQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06550189",
"articleId": "12OmNBoNroS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2010/6821/0/05444652",
"title": "Virtual object manipulation system with substitutive display of tangential force and slip by control of vibrotactile phantom sensation",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444652/12OmNAFWOO7",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2012/4836/0/4836a056",
"title": "Enhancing Touch Screen Games Through a Cable-driven Force Feedback Device",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2012/4836a056/12OmNB836Kj",
"parentPublication": {
"id": "proceedings/icvrv/2012/4836/0",
"title": "2012 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948467",
"title": "[Poster] Touch gestures for improved 3D object manipulation in mobile augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948467/12OmNrkT7xo",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1989/1938/0/00099982",
"title": "Area touch sensor for dextrous manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1989/00099982/12OmNvT2p1E",
"parentPublication": {
"id": "proceedings/robot/1989/1938/0",
"title": "1989 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/3/71083027",
"title": "Experiments in dexterous hybrid force and position control of a master/slave electrohydraulic manipulator",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71083027/12OmNzTH16o",
"parentPublication": {
"id": "proceedings/iros/1995/7108/3",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012301",
"title": "An experiment in hybrid position/force control of a six DOF revolute manipulator",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012301/12OmNzcxZmz",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446516",
"title": "Softness-Hardness and Stickiness Feedback Using Electrical Stimulation While Touching a Virtual Object",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446516/13bd1fWcuDz",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/01/07502124",
"title": "Vibrotactile Sensitivity in Active Touch: Effect of Pressing Force",
"doi": null,
"abstractUrl": "/journal/th/2017/01/07502124/13rRUxjQyvy",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2018/3360/0/08482015",
"title": "Hybrid Force-Position Robot Control: An Artificial Neural Network Backstepping Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2018/08482015/14dcDXWbm0d",
"parentPublication": {
"id": "proceedings/icci*cc/2018/3360/0",
"title": "2018 IEEE 17th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09139211",
"title": "Conveying Emotions Through Device-Initiated Touch",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09139211/1ls8f939qW4",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAiSpZ",
"title": "2015 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCvLXZ7",
"doi": "10.1109/VR.2015.7223324",
"title": "3DTouch: A wearable 3D input device for 3D applications",
"normalizedTitle": "3DTouch: A wearable 3D input device for 3D applications",
"abstract": "3D applications appear in every corner of life in the current technology era. There is a need for an ubiquitous 3D input device that works with many different platforms, from head-mounted displays (HMDs) to mobile touch devices, 3DTVs, and even the Cave Automatic Virtual Environments. We present 3DTouch, a novel wearable 3D input device worn on the fingertip for 3D manipulation tasks. 3DTouch is designed to fill the missing gap of a 3D input device that is self-contained, mobile, and universally works across various 3D platforms. This paper presents a low-cost solution to designing and implementing such a device. Our approach relies on a relative positioning technique using an optical laser sensor and a 9-DOF inertial measurement unit. The device employs touch input for the benefits of passive haptic feedback, and movement stability. On the other hand, with touch interaction, 3DTouch is conceptually less fatiguing to use over many hours than 3D spatial input devices. We propose a set of 3D interaction techniques including selection, translation, and rotation using 3DTouch. An evaluation also demonstrates the device's tracking accuracy of 1.10 mm and 2.33 degrees for subtle touch interaction in 3D space. We envision that modular solutions like 3DTouch opens up a whole new design space for interaction techniques to further develop on. With 3DTouch, we attempt to bring 3D applications a step closer to users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D applications appear in every corner of life in the current technology era. There is a need for an ubiquitous 3D input device that works with many different platforms, from head-mounted displays (HMDs) to mobile touch devices, 3DTVs, and even the Cave Automatic Virtual Environments. We present 3DTouch, a novel wearable 3D input device worn on the fingertip for 3D manipulation tasks. 3DTouch is designed to fill the missing gap of a 3D input device that is self-contained, mobile, and universally works across various 3D platforms. This paper presents a low-cost solution to designing and implementing such a device. Our approach relies on a relative positioning technique using an optical laser sensor and a 9-DOF inertial measurement unit. The device employs touch input for the benefits of passive haptic feedback, and movement stability. On the other hand, with touch interaction, 3DTouch is conceptually less fatiguing to use over many hours than 3D spatial input devices. We propose a set of 3D interaction techniques including selection, translation, and rotation using 3DTouch. An evaluation also demonstrates the device's tracking accuracy of 1.10 mm and 2.33 degrees for subtle touch interaction in 3D space. We envision that modular solutions like 3DTouch opens up a whole new design space for interaction techniques to further develop on. With 3DTouch, we attempt to bring 3D applications a step closer to users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D applications appear in every corner of life in the current technology era. There is a need for an ubiquitous 3D input device that works with many different platforms, from head-mounted displays (HMDs) to mobile touch devices, 3DTVs, and even the Cave Automatic Virtual Environments. We present 3DTouch, a novel wearable 3D input device worn on the fingertip for 3D manipulation tasks. 3DTouch is designed to fill the missing gap of a 3D input device that is self-contained, mobile, and universally works across various 3D platforms. This paper presents a low-cost solution to designing and implementing such a device. Our approach relies on a relative positioning technique using an optical laser sensor and a 9-DOF inertial measurement unit. The device employs touch input for the benefits of passive haptic feedback, and movement stability. On the other hand, with touch interaction, 3DTouch is conceptually less fatiguing to use over many hours than 3D spatial input devices. We propose a set of 3D interaction techniques including selection, translation, and rotation using 3DTouch. An evaluation also demonstrates the device's tracking accuracy of 1.10 mm and 2.33 degrees for subtle touch interaction in 3D space. We envision that modular solutions like 3DTouch opens up a whole new design space for interaction techniques to further develop on. With 3DTouch, we attempt to bring 3D applications a step closer to users.",
"fno": "07223324",
"keywords": [
"Three Dimensional Displays",
"Mice",
"Tracking",
"Accuracy",
"Performance Evaluation",
"Optical Sensors",
"Shape",
"H 5 2 Information Interfaces And Presentation User Interfaces Graphical User Interfaces Input Devices And Strategies"
],
"authors": [
{
"affiliation": "Department of Computer Science, University of Wyoming",
"fullName": "Anh Nguyen",
"givenName": "Anh",
"surname": "Nguyen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University of Wyoming",
"fullName": "Amy Banic",
"givenName": "Amy",
"surname": "Banic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "55-61",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1727-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07223323",
"articleId": "12OmNzZmZu4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07223325",
"articleId": "12OmNrFTrba",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ncm/2009/3769/0/3769b607",
"title": "Design of the 3D Input Method Based on Touch Device for Mobile",
"doi": null,
"abstractUrl": "/proceedings-article/ncm/2009/3769b607/12OmNAS9zR1",
"parentPublication": {
"id": "proceedings/ncm/2009/3769/0",
"title": "Networked Computing and Advanced Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2009/3965/0/04811236",
"title": "Demo: The Globefish: A novel input device for desktop-based 3D interaction",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2009/04811236/12OmNAoUT62",
"parentPublication": {
"id": "proceedings/3dui/2009/3965/0",
"title": "2009 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2011/4445/0/4445a242",
"title": "v-Glove: A 3D Virtual Touch Interface",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2011/4445a242/12OmNqBKTU6",
"parentPublication": {
"id": "proceedings/svr/2011/4445/0",
"title": "2011 XIII Symposium on Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476590",
"title": "Assessing the Effects of Orientation and Device on (Constrained) 3D Movement Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476590/12OmNrAv40k",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/2/05745056",
"title": "Stereo vision based 3D input device",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745056/12OmNrGsDkV",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/2",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798863",
"title": "Poster: Wearable input device for smart glasses based on a wristband-type motion-aware touch panel",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798863/12OmNy5hRlX",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798842",
"title": "HybridSpace: Integrating 3D freehand input and stereo viewing into traditional desktop applications",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798842/12OmNyUnEGq",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2012/4687/0/4687a502",
"title": "Pingu: A New Miniature Wearable Device for Ubiquitous Computing Environments",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2012/4687a502/12OmNyo1nYs",
"parentPublication": {
"id": "proceedings/cisis/2012/4687/0",
"title": "2012 Sixth International Conference on Complex, Intelligent, and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223451",
"title": "3DTouch: A wearable 3D input device for 3D applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223451/12OmNzE54CB",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480807",
"title": "Assessing the Effects of Orientation and Device on 3D Positioning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480807/12OmNzdoMwc",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBscCYB",
"title": "2016 4th Intl. Conf. on Applied Computing and Information Technology (ACIT), 3rd Intl. Conf. on Computational Science/Intelligence and Applied Informatics (CSII), and 1st Intl. Conf. on Big Data, Cloud Computing, Data Science & Engineering (BCD)",
"acronym": "acit-csi",
"groupId": "1810566",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrGsDoo",
"doi": "10.1109/ACIT-CSII-BCD.2016.027",
"title": "Integration of Hand Gesture and Multi Touch Gesture with Glove Type Device",
"normalizedTitle": "Integration of Hand Gesture and Multi Touch Gesture with Glove Type Device",
"abstract": "In this study, we aimed to integrate multi-touch gesture with hand gesture and make new input method. Multi-touch gesture and hand gesture have been studied separately, however, these could be integrated, and it would be new interaction way for computer. We made a large scale multi-touch screen by FTIR method that uses infrared ray to detect the touched point, and a hand gesture recognition device with sensors, pressure sensor and bend sensor. In the end, we integrated the input information to interact with computer. User can input coordinate by touching, and simultaneously input action by performing hand gesture. For instance, user can select target by touching, and then make an action on target by clicking the pressure sensor or bending the bend sensor. We demonstrated the input method on GUI application.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this study, we aimed to integrate multi-touch gesture with hand gesture and make new input method. Multi-touch gesture and hand gesture have been studied separately, however, these could be integrated, and it would be new interaction way for computer. We made a large scale multi-touch screen by FTIR method that uses infrared ray to detect the touched point, and a hand gesture recognition device with sensors, pressure sensor and bend sensor. In the end, we integrated the input information to interact with computer. User can input coordinate by touching, and simultaneously input action by performing hand gesture. For instance, user can select target by touching, and then make an action on target by clicking the pressure sensor or bending the bend sensor. We demonstrated the input method on GUI application.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this study, we aimed to integrate multi-touch gesture with hand gesture and make new input method. Multi-touch gesture and hand gesture have been studied separately, however, these could be integrated, and it would be new interaction way for computer. We made a large scale multi-touch screen by FTIR method that uses infrared ray to detect the touched point, and a hand gesture recognition device with sensors, pressure sensor and bend sensor. In the end, we integrated the input information to interact with computer. User can input coordinate by touching, and simultaneously input action by performing hand gesture. For instance, user can select target by touching, and then make an action on target by clicking the pressure sensor or bending the bend sensor. We demonstrated the input method on GUI application.",
"fno": "07916962",
"keywords": [
"Data Gloves",
"Gesture Recognition",
"Graphical User Interfaces",
"Human Computer Interaction",
"Pressure Sensors",
"Touch Sensitive Screens",
"Multitouch Gesture",
"Glove Type Device",
"Large Scale Multitouch Screen",
"FTIR Method",
"Infrared Ray",
"Touched Point Detection",
"Hand Gesture Recognition Device",
"Pressure Sensor",
"Bend Sensor",
"Human Computer Interaction",
"Input Action",
"Target Selection",
"GUI Application",
"Thumb",
"Gesture Recognition",
"Cameras",
"Performance Evaluation",
"Pressure Sensors",
"Multi Touch",
"FTIR Display",
"Sensor",
"Hand Gesture"
],
"authors": [
{
"affiliation": null,
"fullName": "Hayato Takahashi",
"givenName": "Hayato",
"surname": "Takahashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yuhki Kitazono",
"givenName": "Yuhki",
"surname": "Kitazono",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acit-csi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-12-01T00:00:00",
"pubType": "proceedings",
"pages": "81-86",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-4871-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07916961",
"articleId": "12OmNCfjeoZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07916963",
"articleId": "12OmNC0guAP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isms/2012/4668/0/4668a262",
"title": "Hidden Markov Model-Based Gesture Recognition with Overlapping Hand-Head/Hand-Hand Estimated Using Kalman Filter",
"doi": null,
"abstractUrl": "/proceedings-article/isms/2012/4668a262/12OmNA0vnNL",
"parentPublication": {
"id": "proceedings/isms/2012/4668/0",
"title": "Intelligent Systems, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726505",
"title": "Hand gesture to speech conversion using Matlab",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726505/12OmNqGA57o",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icct/2017/3030/0/08324015",
"title": "Comparative study for vision based and data based hand gesture recognition technique",
"doi": null,
"abstractUrl": "/proceedings-article/icct/2017/08324015/12OmNwwuE1a",
"parentPublication": {
"id": "proceedings/icct/2017/3030/0",
"title": "2017 International Conference on Intelligent Communication and Computational Techniques (ICCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2016/7258/0/07552959",
"title": "Hand gesture recognition based on canonical formed superpixel earth mover's distance",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2016/07552959/12OmNxE2mLg",
"parentPublication": {
"id": "proceedings/icme/2016/7258/0",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981b096",
"title": "A Real-Time Hand Gesture Recognition Approach Based on Motion Features of Feature Points",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981b096/12OmNxxvAHv",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hcs/2012/8879/0/07476466",
"title": "Touch-free technology",
"doi": null,
"abstractUrl": "/proceedings-article/hcs/2012/07476466/12OmNy49sOa",
"parentPublication": {
"id": "proceedings/hcs/2012/8879/0",
"title": "2012 IEEE Hot Chips 24 Symposium (HCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504716",
"title": "Monochrome glove: A robust real-time hand gesture recognition method by using a fabric glove with design of structured markers",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504716/12OmNz2kqqa",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tiiec/2013/5146/0/5146a328",
"title": "Hand Talk-Implementation of a Gesture Recognizing Glove",
"doi": null,
"abstractUrl": "/proceedings-article/tiiec/2013/5146a328/12OmNz5s0Os",
"parentPublication": {
"id": "proceedings/tiiec/2013/5146/0",
"title": "2013 Texas Instruments India Educators' Conference (TIIEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460035",
"title": "A sliding window approach to natural hand gesture recognition using a custom data glove",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460035/12OmNzxPTFx",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08263123",
"title": "MRTouch: Adding Touch Input to Head-Mounted Mixed Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08263123/13rRUyft7D9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyjLoRw",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxWcHdT",
"doi": "10.1109/ISMAR.2014.6948450",
"title": "[Poster] Combining multi-touch and device movement in mobile augmented reality manipulations",
"normalizedTitle": "[Poster] Combining multi-touch and device movement in mobile augmented reality manipulations",
"abstract": "Three input modalities for manipulation techniques in Mobile Augmented Reality have been compared. The first one employs only multi-touch input. The second modality uses the movements of the device. Finally, the third one is a hybrid approach based on a combination of the two previous modalities. A user evaluation (N=12) on a 6 DOF docking task suggests that combining multi-touch input and device movement offers the best results in terms of task completion time and efficiency. Nonetheless, using solely the device is more intuitive and performs worse only in large rotations. Given that mobile devices are increasingly supporting movement tracking, the presented results encourage the addition of device movement as an input modality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Three input modalities for manipulation techniques in Mobile Augmented Reality have been compared. The first one employs only multi-touch input. The second modality uses the movements of the device. Finally, the third one is a hybrid approach based on a combination of the two previous modalities. A user evaluation (N=12) on a 6 DOF docking task suggests that combining multi-touch input and device movement offers the best results in terms of task completion time and efficiency. Nonetheless, using solely the device is more intuitive and performs worse only in large rotations. Given that mobile devices are increasingly supporting movement tracking, the presented results encourage the addition of device movement as an input modality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Three input modalities for manipulation techniques in Mobile Augmented Reality have been compared. The first one employs only multi-touch input. The second modality uses the movements of the device. Finally, the third one is a hybrid approach based on a combination of the two previous modalities. A user evaluation (N=12) on a 6 DOF docking task suggests that combining multi-touch input and device movement offers the best results in terms of task completion time and efficiency. Nonetheless, using solely the device is more intuitive and performs worse only in large rotations. Given that mobile devices are increasingly supporting movement tracking, the presented results encourage the addition of device movement as an input modality.",
"fno": "06948450",
"keywords": [
"Performance Evaluation",
"Thumb",
"Augmented Reality",
"Mobile Communication",
"Three Dimensional Displays",
"Grasping",
"Multi Touch",
"Mobile Augmented Reality",
"Manipulation"
],
"authors": [
{
"affiliation": "Public University of Navarre, Pamplona, Spain",
"fullName": "Asier Marzo",
"givenName": "Asier",
"surname": "Marzo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "INRIA Bordeaux, Talence, France",
"fullName": "Martin Hachet",
"givenName": "Martin",
"surname": "Hachet",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "281-282",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6184-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06948449",
"articleId": "12OmNCbU2XO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06948451",
"articleId": "12OmNqyDjpg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2013/6097/0/06550188",
"title": "ForceExtension: Extending isotonic position-controlled multi-touch gestures with rate-controlled force sensing for 3D manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550188/12OmNAsk4A7",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2016/4155/0/4155a117",
"title": "Application Interface Structure Research Based on Touch Screen",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2016/4155a117/12OmNqGA5aG",
"parentPublication": {
"id": "proceedings/icris/2016/4155/0",
"title": "2016 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csi/2016/4871/0/07916962",
"title": "Integration of Hand Gesture and Multi Touch Gesture with Glove Type Device",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csi/2016/07916962/12OmNrGsDoo",
"parentPublication": {
"id": "proceedings/acit-csi/2016/4871/0",
"title": "2016 4th Intl. Conf. on Applied Computing and Information Technology (ACIT), 3rd Intl. Conf. on Computational Science/Intelligence and Applied Informatics (CSII), and 1st Intl. Conf. on Big Data, Cloud Computing, Data Science & Engineering (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948467",
"title": "[Poster] Touch gestures for improved 3D object manipulation in mobile augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948467/12OmNrkT7xo",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipccc/2014/7575/0/07017067",
"title": "Continuous user identification via touch and movement behavioral biometrics",
"doi": null,
"abstractUrl": "/proceedings-article/ipccc/2014/07017067/12OmNwIHot5",
"parentPublication": {
"id": "proceedings/ipccc/2014/7575/0",
"title": "2014 IEEE International Performance Computing and Communications Conference (IPCCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/3/07295069",
"title": "Multi touch shape recognition for projected capacitive touch screen",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295069/12OmNyqzLZg",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671793",
"title": "Passive Deformable Haptic glove to support 3D interactions in mobile augmented reality environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671793/12OmNz5JBPu",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08263123",
"title": "MRTouch: Adding Touch Input to Head-Mounted Mixed Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08263123/13rRUyft7D9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09139211",
"title": "Conveying Emotions Through Device-Initiated Touch",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09139211/1ls8f939qW4",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a526",
"title": "Text Selection in AR-HMD Using a Smartphone as an Input Device",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a526/1tnXhwEI6RO",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxFaLhU",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"acronym": "visapp",
"groupId": "1806906",
"volume": "3",
"displayVolume": "3",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz4SOzG",
"doi": "",
"title": "Virtual touch screen “VIRTOS” implementing virtual touch buttons and virtual sliders using a projector and camera",
"normalizedTitle": "Virtual touch screen “VIRTOS” implementing virtual touch buttons and virtual sliders using a projector and camera",
"abstract": "We propose a large interactive display with virtual touch buttons and sliders on a pale-colored flat wall. Our easy-to-install system consists of a front projector and a single commodity camera. A button touch is detected based on the area of the shadow cast by the user's hand; this shadow becomes very small when the button is touched. The shadow area is segmented by a brief change of the button to a different color when a large foreground (i.e., the hand and its shadow) covers the button region. Therefore, no time consuming operations, such as morphing or shape analysis, are required. Background subtraction is used to extract the foreground region. The reference image for the background is continuously adjusted to match the ambient light. Our virtual slider is based on this touch-button mechanism. When tested, our scheme proved robust to differences in illumination. The response time for touch detection was about 150 ms. Our virtual slider has a quick response and proved suitable as a controller for a Breakout-style game.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a large interactive display with virtual touch buttons and sliders on a pale-colored flat wall. Our easy-to-install system consists of a front projector and a single commodity camera. A button touch is detected based on the area of the shadow cast by the user's hand; this shadow becomes very small when the button is touched. The shadow area is segmented by a brief change of the button to a different color when a large foreground (i.e., the hand and its shadow) covers the button region. Therefore, no time consuming operations, such as morphing or shape analysis, are required. Background subtraction is used to extract the foreground region. The reference image for the background is continuously adjusted to match the ambient light. Our virtual slider is based on this touch-button mechanism. When tested, our scheme proved robust to differences in illumination. The response time for touch detection was about 150 ms. Our virtual slider has a quick response and proved suitable as a controller for a Breakout-style game.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a large interactive display with virtual touch buttons and sliders on a pale-colored flat wall. Our easy-to-install system consists of a front projector and a single commodity camera. A button touch is detected based on the area of the shadow cast by the user's hand; this shadow becomes very small when the button is touched. The shadow area is segmented by a brief change of the button to a different color when a large foreground (i.e., the hand and its shadow) covers the button region. Therefore, no time consuming operations, such as morphing or shape analysis, are required. Background subtraction is used to extract the foreground region. The reference image for the background is continuously adjusted to match the ambient light. Our virtual slider is based on this touch-button mechanism. When tested, our scheme proved robust to differences in illumination. The response time for touch detection was about 150 ms. Our virtual slider has a quick response and proved suitable as a controller for a Breakout-style game.",
"fno": "07295058",
"keywords": [
"Cameras",
"Image Color Analysis",
"Thumb",
"Shape",
"Sensors",
"Lighting",
"Virtual Touch Screen",
"Projector Camera Systems",
"Projector Based Display",
"Touch Detection"
],
"authors": [
{
"affiliation": "Department of Information Systems and Multimedia Design, Tokyo Denki University, Japan",
"fullName": "Takashi Homma",
"givenName": "Takashi",
"surname": "Homma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Information Systems and Multimedia Design, Tokyo Denki University, Japan",
"fullName": "Katsuto Nakajima",
"givenName": "Katsuto",
"surname": "Nakajima",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "visapp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-01-01T00:00:00",
"pubType": "proceedings",
"pages": "34-43",
"year": "2014",
"issn": null,
"isbn": "978-9-8975-8133-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07295057",
"articleId": "12OmNxveNEs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07295059",
"articleId": "12OmNy6HQQw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2013/5048/0/5048a786",
"title": "Reach Out and Touch Somebody's Virtual Hand: Affectively Connected through Mediated Touch",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a786/12OmNAq3hLn",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2012/4836/0/4836a056",
"title": "Enhancing Touch Screen Games Through a Cable-driven Force Feedback Device",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2012/4836a056/12OmNB836Kj",
"parentPublication": {
"id": "proceedings/icvrv/2012/4836/0",
"title": "2012 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2016/4155/0/4155a117",
"title": "Application Interface Structure Research Based on Touch Screen",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2016/4155a117/12OmNqGA5aG",
"parentPublication": {
"id": "proceedings/icris/2016/4155/0",
"title": "2016 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2016/0987/0/0987a516",
"title": "Design of Sketch-Based Image Search UI for Finger Gesture",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2016/0987a516/12OmNrFTr8u",
"parentPublication": {
"id": "proceedings/cisis/2016/0987/0",
"title": "2016 10th International Conference on Complex, Intelligent, and Software Intensive Systems (CISIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a359",
"title": "A Method of Touching and Moving Virtual Shadows with Real Shadows",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a359/12OmNwpGgGH",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiaiaai/2014/4174/0/06913282",
"title": "Effects of Visual Elements into the Touch Interaction during the Drag Operation",
"doi": null,
"abstractUrl": "/proceedings-article/iiaiaai/2014/06913282/12OmNxymo9l",
"parentPublication": {
"id": "proceedings/iiaiaai/2014/4174/0",
"title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643558",
"title": "Foreground and shadow occlusion handling for outdoor augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643558/12OmNyRPgDK",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/3/07295069",
"title": "Multi touch shape recognition for projected capacitive touch screen",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295069/12OmNyqzLZg",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tiiec/2013/5146/0/5146a285",
"title": "Precise Control of Objects with Touch Screen",
"doi": null,
"abstractUrl": "/proceedings-article/tiiec/2013/5146a285/12OmNzVoBKJ",
"parentPublication": {
"id": "proceedings/tiiec/2013/5146/0",
"title": "2013 Texas Instruments India Educators' Conference (TIIEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08263123",
"title": "MRTouch: Adding Touch Input to Head-Mounted Mixed Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08263123/13rRUyft7D9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7Wqal3Fkc",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00144",
"title": "Lightweight Wearable AR System using Head-mounted Projector for Work Support",
"normalizedTitle": "Lightweight Wearable AR System using Head-mounted Projector for Work Support",
"abstract": "In this paper, we propose a lightweight wearable AR system for work support in which a user mounts a projector and a monocular camera on his or her head. The system can present information in front of the user's line of sight by superimposing images on a surface in real space. The system uses monocular SLAM to estimate the pose of the user's head, and changes images depending on the head movement, enabling the fixation of virtual objects. We developed a prototype system in which a small projector and a camera are fixed to glasses. The total weight of the prototype system is 84 g, which is small and lightweight enough for work support. We confirmed that a virtual object was fixed on a real surface using the prototype system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a lightweight wearable AR system for work support in which a user mounts a projector and a monocular camera on his or her head. The system can present information in front of the user's line of sight by superimposing images on a surface in real space. The system uses monocular SLAM to estimate the pose of the user's head, and changes images depending on the head movement, enabling the fixation of virtual objects. We developed a prototype system in which a small projector and a camera are fixed to glasses. The total weight of the prototype system is 84 g, which is small and lightweight enough for work support. We confirmed that a virtual object was fixed on a real surface using the prototype system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a lightweight wearable AR system for work support in which a user mounts a projector and a monocular camera on his or her head. The system can present information in front of the user's line of sight by superimposing images on a surface in real space. The system uses monocular SLAM to estimate the pose of the user's head, and changes images depending on the head movement, enabling the fixation of virtual objects. We developed a prototype system in which a small projector and a camera are fixed to glasses. The total weight of the prototype system is 84 g, which is small and lightweight enough for work support. We confirmed that a virtual object was fixed on a real surface using the prototype system.",
"fno": "536500a690",
"keywords": [
"Augmented Reality",
"Cameras",
"Helmet Mounted Displays",
"Image Sensors",
"Optical Projectors",
"Robot Vision",
"SLAM Robots",
"Camera",
"Head Movement",
"Head Mounted Projector",
"Lightweight Wearable AR System",
"Monocular Camera",
"Monocular SLAM",
"Prototype System",
"Virtual Object",
"Work Support",
"Head",
"Simultaneous Localization And Mapping",
"Prototypes",
"Glass",
"Cameras",
"Augmented Reality"
],
"authors": [
{
"affiliation": "Graduate School of Science and Engineering Saitama University",
"fullName": "Haruki Yuda",
"givenName": "Haruki",
"surname": "Yuda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Science and Engineering Saitama University",
"fullName": "Masamichi Limori",
"givenName": "Masamichi",
"surname": "Limori",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Science and Engineering Saitama University",
"fullName": "Taishi Iriyama",
"givenName": "Taishi",
"surname": "Iriyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Science and Engineering Saitama University",
"fullName": "Takashi Komuro",
"givenName": "Takashi",
"surname": "Komuro",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "690-691",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a686",
"articleId": "1J7WgWfFoOs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a692",
"articleId": "1J7WxP5sjmg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/music/2012/1956/0/4727a001",
"title": "A Mobile Head-Mounted Display for Action Sports",
"doi": null,
"abstractUrl": "/proceedings-article/music/2012/4727a001/12OmNrMZpuh",
"parentPublication": {
"id": "proceedings/music/2012/1956/0",
"title": "2012 Third FTRA International Conference on Mobile, Ubiquitous, and Intelligent Computing (MUSIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836523",
"title": "Human Attention and fatigue for AR Head-Up Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836523/12OmNwFidbp",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2000/0478/0/04780233",
"title": "Visuo-Haptic Display Using Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780233/12OmNwHz00K",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwar/1999/0359/0/03590075",
"title": "A Method for Calibrating See-Through Head-Mounted Displays for AR",
"doi": null,
"abstractUrl": "/proceedings-article/iwar/1999/03590075/12OmNxTVU20",
"parentPublication": {
"id": "proceedings/iwar/1999/0359/0",
"title": "Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2003/1882/0/18820110",
"title": "Combining Head-Mounted and Projector-Based Displays for Surgical Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2003/18820110/12OmNxX3uIq",
"parentPublication": {
"id": "proceedings/vr/2003/1882/0",
"title": "Proceedings IEEE Virtual Reality 2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2009/3791/0/3791a043",
"title": "Enhancing Presence in Head-Mounted Display Environments by Visual Body Feedback Using Head-Mounted Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2009/3791a043/12OmNxveNRr",
"parentPublication": {
"id": "proceedings/cw/2009/3791/0",
"title": "2009 International Conference on CyberWorlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811063",
"title": "False Image Projector For Head Mounted Display Using Retrotransmissive Optical System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811063/12OmNxwENIr",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836518",
"title": "AR Tabletop Interface using a Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836518/12OmNyoiYW4",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a357",
"title": "Using Optical Head-Mounted Displays for instant, contextualized feedback",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a357/1FUUaDK7qGA",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a109",
"title": "Comparing Head and AR Glasses Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a109/1yeQMONGc9y",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrNh0vm",
"title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on",
"acronym": "uic-atc",
"groupId": "1002946",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCmGNYK",
"doi": "10.1109/UIC-ATC.2012.95",
"title": "Enhancing Traditional Games with Augmented Reality Technologies",
"normalizedTitle": "Enhancing Traditional Games with Augmented Reality Technologies",
"abstract": "Digital technologies are useful to enhance existing traditional games to increase their pleasure. In many games, digital technologies can add special effects to excite a player emotionally. However, the technologies are also useful to help a player to learn complex rules in the games. Especially, traditional games like a poker are not easy to learn for beginners so many recent young people lose interests to play the games. In this paper, we present AR-Hold'em that is an enhanced Texas Hold'em poker game with augmented reality technologies. We also present some user studies showing the effectiveness of our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Digital technologies are useful to enhance existing traditional games to increase their pleasure. In many games, digital technologies can add special effects to excite a player emotionally. However, the technologies are also useful to help a player to learn complex rules in the games. Especially, traditional games like a poker are not easy to learn for beginners so many recent young people lose interests to play the games. In this paper, we present AR-Hold'em that is an enhanced Texas Hold'em poker game with augmented reality technologies. We also present some user studies showing the effectiveness of our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Digital technologies are useful to enhance existing traditional games to increase their pleasure. In many games, digital technologies can add special effects to excite a player emotionally. However, the technologies are also useful to help a player to learn complex rules in the games. Especially, traditional games like a poker are not easy to learn for beginners so many recent young people lose interests to play the games. In this paper, we present AR-Hold'em that is an enhanced Texas Hold'em poker game with augmented reality technologies. We also present some user studies showing the effectiveness of our approach.",
"fno": "4843a822",
"keywords": [
"Games",
"Footwear",
"Augmented Reality",
"Interviews",
"Materials",
"RFID Tags",
"Card Games",
"Augmented Reality"
],
"authors": [
{
"affiliation": null,
"fullName": "Hiroyuki Sakuma",
"givenName": "Hiroyuki",
"surname": "Sakuma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tetsuo Yamabe",
"givenName": "Tetsuo",
"surname": "Yamabe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tatsuo Nakajima",
"givenName": "Tatsuo",
"surname": "Nakajima",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "uic-atc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-09-01T00:00:00",
"pubType": "proceedings",
"pages": "822-825",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-3084-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4843a816",
"articleId": "12OmNzICEQJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4843a826",
"articleId": "12OmNwDACqK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a275",
"title": "Towards Engaging Upper Extremity Motor Dysfunction Assessment Using Augmented Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a275/12OmNrHjqLk",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2015/8843/0/8843a099",
"title": "An Authoring Tool for Location-Based Mobile Games with Augmented Reality Features",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2015/8843a099/12OmNwcUjWC",
"parentPublication": {
"id": "proceedings/sbgames/2015/8843/0",
"title": "2015 14th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2010/6331/0/05460103",
"title": "Augmented Reality Games for Upper-Limb Stroke Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2010/05460103/12OmNwoxSda",
"parentPublication": {
"id": "proceedings/vs-games/2010/6331/0",
"title": "2010 2nd International Conference on Games and Virtual Worlds for Serious Applications (VS-GAMES 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643593",
"title": "Augmented reality for board games",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643593/12OmNxj23c6",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2017/3588/0/3588a227",
"title": "Augmented Reality and Serious Games: A Systematic Literature Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2017/3588a227/12OmNyQGS43",
"parentPublication": {
"id": "proceedings/svr/2017/3588/0",
"title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06239198",
"title": "ARmy: A study of multi-user interaction in spatially augmented games",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239198/12OmNzaQozd",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2008/04/mcg2008040040",
"title": "Toward Next-Gen Mobile AR Games",
"doi": null,
"abstractUrl": "/magazine/cg/2008/04/mcg2008040040/13rRUxASujW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2011/938/0/05766947",
"title": "fAR-PLAY: A framework to develop Augmented/Alternate Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2011/05766947/17D45WnnFXN",
"parentPublication": {
"id": "proceedings/percomw/2011/938/0",
"title": "2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops 2011). PerCom-Workshops 2011: 2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a183",
"title": "LabXscape: A Prototype for Enhancing Player Experience in Cross-Reality Gameplay",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a183/1J7Wxf6naZa",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2019/3798/0/379800b795",
"title": "Solving Six-Player Games via Online Situation Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2019/379800b795/1hrM1ySFU4g",
"parentPublication": {
"id": "proceedings/ictai/2019/3798/0",
"title": "2019 IEEE 31st International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwDAC3X",
"title": "2012 IEEE 15th International Conference on Computational Science and Engineering",
"acronym": "cse",
"groupId": "1002115",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrkjVeA",
"doi": "10.1109/ICCSE.2012.15",
"title": "DCA: Dynamic Challenging Level Adapter for Real-time Strategy Games",
"normalizedTitle": "DCA: Dynamic Challenging Level Adapter for Real-time Strategy Games",
"abstract": "Recently, Real-Time Strategy (RTS) games, such as Star Craft and Age of Empire, become more and more popular. Reasons of these RTS games attracting many game players are not only the fancy game presentation but also challenging game AI of computer opponents. In order to match game challenging level to different game players, these RTS games always provide several default difficulty levels for game players' choosing. However, settings of these difficulty levels cannot always accommodate challenging level requirements of different level game players. Therefore, this paper proposes a Dynamic Challenging Level Adapter (DCA) mechanism to automatically adapt computer opponent's behaviors for different game players. Each game player doesn't need to choose a difficulty level in advance and the game AI of computer opponent controlled by the DCA mechanism can dynamically adapt for meeting the challenging level of each game player. This paper proposes that the warrior is the most important element to dominate the result of each match in RTS games. Therefore, the main idea underlying the DCA mechanism is based on analyzing warrior capabilities between two game players and real-time adjusting action strategies to fight with its opponent. In order to test the effectiveness of the DCA mechanism, this study applies the most popular RTS game, Star Craft II (SC II), as the experimental game platform. In the experiments, this study uses default difficulty level AI of SC II game to emulate game players and makes these emulated game players to fight with the opponents controlled by the DCA mechanism. Additionally, a challenging rate (CR) formula is proposed as a strength evaluation between two opponents. From the experimental results, the difference of CR value of computer opponent improved by the DCA mechanism can be reduced by more than 80%. Additionally, the duration of a game match is usually double. Furthermore, the winner is always decided in the rear of each game match.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recently, Real-Time Strategy (RTS) games, such as Star Craft and Age of Empire, become more and more popular. Reasons of these RTS games attracting many game players are not only the fancy game presentation but also challenging game AI of computer opponents. In order to match game challenging level to different game players, these RTS games always provide several default difficulty levels for game players' choosing. However, settings of these difficulty levels cannot always accommodate challenging level requirements of different level game players. Therefore, this paper proposes a Dynamic Challenging Level Adapter (DCA) mechanism to automatically adapt computer opponent's behaviors for different game players. Each game player doesn't need to choose a difficulty level in advance and the game AI of computer opponent controlled by the DCA mechanism can dynamically adapt for meeting the challenging level of each game player. This paper proposes that the warrior is the most important element to dominate the result of each match in RTS games. Therefore, the main idea underlying the DCA mechanism is based on analyzing warrior capabilities between two game players and real-time adjusting action strategies to fight with its opponent. In order to test the effectiveness of the DCA mechanism, this study applies the most popular RTS game, Star Craft II (SC II), as the experimental game platform. In the experiments, this study uses default difficulty level AI of SC II game to emulate game players and makes these emulated game players to fight with the opponents controlled by the DCA mechanism. Additionally, a challenging rate (CR) formula is proposed as a strength evaluation between two opponents. From the experimental results, the difference of CR value of computer opponent improved by the DCA mechanism can be reduced by more than 80%. Additionally, the duration of a game match is usually double. Furthermore, the winner is always decided in the rear of each game match.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recently, Real-Time Strategy (RTS) games, such as Star Craft and Age of Empire, become more and more popular. Reasons of these RTS games attracting many game players are not only the fancy game presentation but also challenging game AI of computer opponents. In order to match game challenging level to different game players, these RTS games always provide several default difficulty levels for game players' choosing. However, settings of these difficulty levels cannot always accommodate challenging level requirements of different level game players. Therefore, this paper proposes a Dynamic Challenging Level Adapter (DCA) mechanism to automatically adapt computer opponent's behaviors for different game players. Each game player doesn't need to choose a difficulty level in advance and the game AI of computer opponent controlled by the DCA mechanism can dynamically adapt for meeting the challenging level of each game player. This paper proposes that the warrior is the most important element to dominate the result of each match in RTS games. Therefore, the main idea underlying the DCA mechanism is based on analyzing warrior capabilities between two game players and real-time adjusting action strategies to fight with its opponent. In order to test the effectiveness of the DCA mechanism, this study applies the most popular RTS game, Star Craft II (SC II), as the experimental game platform. In the experiments, this study uses default difficulty level AI of SC II game to emulate game players and makes these emulated game players to fight with the opponents controlled by the DCA mechanism. Additionally, a challenging rate (CR) formula is proposed as a strength evaluation between two opponents. From the experimental results, the difference of CR value of computer opponent improved by the DCA mechanism can be reduced by more than 80%. Additionally, the duration of a game match is usually double. Furthermore, the winner is always decided in the rear of each game match.",
"fno": "4914a030",
"keywords": [
"Games",
"Artificial Intelligence",
"Computers",
"Buildings",
"Wheels",
"Real Time Systems",
"Sociology",
"Artificial Intelligence AI",
"Challenging Rate CR",
"Challenging Level Adapter",
"Real Time Strategy RTS Game"
],
"authors": [
{
"affiliation": null,
"fullName": "Shin-Hung Chang",
"givenName": "Shin-Hung",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nai-Yan Yang",
"givenName": "Nai-Yan",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-12-01T00:00:00",
"pubType": "proceedings",
"pages": "30-35",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-5165-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4914a022",
"articleId": "12OmNzIUfMt",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4914a036",
"articleId": "12OmNAYXWB6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/taai/2012/4976/0/06395051",
"title": "Pattern Formation Based on Potential Field in Real-Time Strategy Games",
"doi": null,
"abstractUrl": "/proceedings-article/taai/2012/06395051/12OmNvDZF81",
"parentPublication": {
"id": "proceedings/taai/2012/4976/0",
"title": "2012 Conference on Technologies and Applications of Artificial Intelligence (TAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2010/4359/0/4359a071",
"title": "An Artificial Intelligence System to Help the Player of Real-Time Strategy Games",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2010/4359a071/12OmNwfb6SU",
"parentPublication": {
"id": "proceedings/sbgames/2010/4359/0",
"title": "2010 Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bic-ta/2011/4514/0/4514a015",
"title": "Evolving Neural Controllers Using GA for Warcraft 3-Real Time Strategy Game",
"doi": null,
"abstractUrl": "/proceedings-article/bic-ta/2011/4514a015/12OmNxQOjxG",
"parentPublication": {
"id": "proceedings/bic-ta/2011/4514/0",
"title": "Bio-Inspired Computing: Theories and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2014/8065/0/8065a115",
"title": "Evolving the Behavior of Autonomous Agents in Strategic Combat Scenarios via SARSA Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2014/8065a115/12OmNxisQXg",
"parentPublication": {
"id": "proceedings/sbgames/2014/8065/0",
"title": "2014 Brazilian Symposium on Computer Games and Digital Entertainment (SBGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/1/3736a100",
"title": "Case Learning and Indexing in Real Time Strategy Games",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736a100/12OmNzC5Ttp",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/4",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2016/04/07782476",
"title": "Guest Editorial Real-Time Strategy Games",
"doi": null,
"abstractUrl": "/journal/ci/2016/04/07782476/13rRUwInvMS",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2014/01/06701215",
"title": "A Micromanagement Task Allocation System for Real-Time Strategy Games",
"doi": null,
"abstractUrl": "/journal/ci/2014/01/06701215/13rRUwbaqO0",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2010/02/05443495",
"title": "Towards Intelligent Team Composition and Maneuvering in Real-time Strategy Games",
"doi": null,
"abstractUrl": "/journal/ci/2010/02/05443495/13rRUwx1xJT",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2016/04/07438826",
"title": "Evolving Effective Microbehaviors in Real-Time Strategy Games",
"doi": null,
"abstractUrl": "/journal/ci/2016/04/07438826/13rRUxN5evN",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2016/01/06873276",
"title": "Predicting dominance rankings for score-based games",
"doi": null,
"abstractUrl": "/journal/ci/2016/01/06873276/13rRUy0ZzV5",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyRxFmm",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx3Zjps",
"doi": "10.1109/ICALT.2011.20",
"title": "A Location-Based Mobile Game for Business Education",
"normalizedTitle": "A Location-Based Mobile Game for Business Education",
"abstract": "Location-based games use the position of the player as a key input and take advantage of the player's physical environment, and these games offer great motivational and educational potential. In this paper, we describe the development of a location-based mobile game that can be used as a business consulting simulation for students. In the game, players take the role of consultants in a simulated project at a virtual company. Players physically move to different locations to conduct interviews with virtual characters in order to discover the weaknesses of the company and make change recommendations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Location-based games use the position of the player as a key input and take advantage of the player's physical environment, and these games offer great motivational and educational potential. In this paper, we describe the development of a location-based mobile game that can be used as a business consulting simulation for students. In the game, players take the role of consultants in a simulated project at a virtual company. Players physically move to different locations to conduct interviews with virtual characters in order to discover the weaknesses of the company and make change recommendations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Location-based games use the position of the player as a key input and take advantage of the player's physical environment, and these games offer great motivational and educational potential. In this paper, we describe the development of a location-based mobile game that can be used as a business consulting simulation for students. In the game, players take the role of consultants in a simulated project at a virtual company. Players physically move to different locations to conduct interviews with virtual characters in order to discover the weaknesses of the company and make change recommendations.",
"fno": "4346a042",
"keywords": [
"Business Game",
"Location Awareness",
"Mobile Games",
"Pervasive Games",
"Augmented Reality"
],
"authors": [
{
"affiliation": null,
"fullName": "Jean-Christophe Puja",
"givenName": "Jean-Christophe",
"surname": "Puja",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "David Parsons",
"givenName": "David",
"surname": "Parsons",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-07-01T00:00:00",
"pubType": "proceedings",
"pages": "42-44",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4346-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4346a037",
"articleId": "12OmNvD8Rz7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4346a045",
"articleId": "12OmNqFrGwo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wmute/2012/4662/0/4662a017",
"title": "Mobile Gaming - A Serious Business!",
"doi": null,
"abstractUrl": "/proceedings-article/wmute/2012/4662a017/12OmNAoUTmb",
"parentPublication": {
"id": "proceedings/wmute/2012/4662/0",
"title": "IEEE International Conference on Wireless, Mobile, and Ubiquitous Technology in Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2011/1451/0/06000328",
"title": "Sensing game play. Exploring computer game play in a game café and a mass LAN party",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2011/06000328/12OmNqGiu6J",
"parentPublication": {
"id": "proceedings/cgames/2011/1451/0",
"title": "2011 16th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2011/4467/0/4467a120",
"title": "Let the Game Do the Talking: The Influence of Explicitness and Game Behavior on Comprehension in an Educational Computer Game",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2011/4467a120/12OmNrAv3Dh",
"parentPublication": {
"id": "proceedings/cw/2011/4467/0",
"title": "2011 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2015/8843/0/8843a099",
"title": "An Authoring Tool for Location-Based Mobile Games with Augmented Reality Features",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2015/8843a099/12OmNwcUjWC",
"parentPublication": {
"id": "proceedings/sbgames/2015/8843/0",
"title": "2015 14th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/digitel/2010/3993/0/3993a151",
"title": "Building Your Own Tangible Virtual World: A Design of an RFID-Based Tabletop Game Platform",
"doi": null,
"abstractUrl": "/proceedings-article/digitel/2010/3993a151/12OmNxFsmzj",
"parentPublication": {
"id": "proceedings/digitel/2010/3993/0",
"title": "Digital Game and Intelligent Toy Enhanced Learning, IEEE International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithingscpscom/2011/4580/0/4580a209",
"title": "Understanding Location-Based Information Sharing in a Mobile Human Computation Game",
"doi": null,
"abstractUrl": "/proceedings-article/ithingscpscom/2011/4580a209/12OmNxcdG0T",
"parentPublication": {
"id": "proceedings/ithingscpscom/2011/4580/0",
"title": "International Conference on Internet of Things and International Conference on Cyber, Physical and Social Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2009/3588/0/3588a178",
"title": "Exploring Game Leadership and Online Game Community",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2009/3588a178/12OmNyxXlkY",
"parentPublication": {
"id": "proceedings/vs-games/2009/3588/0",
"title": "Games and Virtual Worlds for Serious Applications, Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dese/2011/4593/0/4593a431",
"title": "Game Content Model: An Ontology for Documenting Serious Game Design",
"doi": null,
"abstractUrl": "/proceedings-article/dese/2011/4593a431/12OmNzDNttK",
"parentPublication": {
"id": "proceedings/dese/2011/4593/0",
"title": "2011 Developments in E-systems Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2013/0820/0/06632618",
"title": "Asymmetric game design and player location: An empirical study on mobile play experiences",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2013/06632618/12OmNzZEAsM",
"parentPublication": {
"id": "proceedings/cgames/2013/0820/0",
"title": "2013 18th International Conference on Computer Games: AI, Animation, Mobile, Interactive Multimedia, Educational & Serious Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/02/mcg2013020006",
"title": "Game Analytics for Game User Research, Part 1: A Workshop Review and Case Study",
"doi": null,
"abstractUrl": "/magazine/cg/2013/02/mcg2013020006/13rRUxAStUD",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqG0SWX",
"title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)",
"acronym": "intetain",
"groupId": "1808166",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzV70qo",
"doi": "",
"title": "ARZombie: A mobile augmented reality game with multimodal interaction",
"normalizedTitle": "ARZombie: A mobile augmented reality game with multimodal interaction",
"abstract": "Augmented reality games have the power to extend virtual gaming into real world scenarios with real people, while enhancing the senses of the user. This paper describes the AR-Zombie game developed with the aim of studying and developing mobile augmented reality applications, specifically for tablets, using face recognition interaction techniques. The goal of the ARZombie player is to kill zombies that are detected through the display of the device. Instead of using markers as a mean of tracking the zombies, this game incorporates a facial recognition system, which will enhance the user experience by improving the interaction of players with the real world. As the player moves around the environment, the game will display virtual zombies on the screen if the detected faces are recognized as belonging to the class of the zombies. ARZombie was tested with users to evaluate the interaction proposals and its components were evaluated regarding the performance in order to ensure a better gaming experience.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented reality games have the power to extend virtual gaming into real world scenarios with real people, while enhancing the senses of the user. This paper describes the AR-Zombie game developed with the aim of studying and developing mobile augmented reality applications, specifically for tablets, using face recognition interaction techniques. The goal of the ARZombie player is to kill zombies that are detected through the display of the device. Instead of using markers as a mean of tracking the zombies, this game incorporates a facial recognition system, which will enhance the user experience by improving the interaction of players with the real world. As the player moves around the environment, the game will display virtual zombies on the screen if the detected faces are recognized as belonging to the class of the zombies. ARZombie was tested with users to evaluate the interaction proposals and its components were evaluated regarding the performance in order to ensure a better gaming experience.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented reality games have the power to extend virtual gaming into real world scenarios with real people, while enhancing the senses of the user. This paper describes the AR-Zombie game developed with the aim of studying and developing mobile augmented reality applications, specifically for tablets, using face recognition interaction techniques. The goal of the ARZombie player is to kill zombies that are detected through the display of the device. Instead of using markers as a mean of tracking the zombies, this game incorporates a facial recognition system, which will enhance the user experience by improving the interaction of players with the real world. As the player moves around the environment, the game will display virtual zombies on the screen if the detected faces are recognized as belonging to the class of the zombies. ARZombie was tested with users to evaluate the interaction proposals and its components were evaluated regarding the performance in order to ensure a better gaming experience.",
"fno": "07325481",
"keywords": [
"Games",
"Face",
"Augmented Reality",
"Face Recognition",
"Face Detection",
"Cameras",
"Lighting",
"Face Detection And Recognition",
"Computers And Information Processing",
"Augmented Reality",
"Mobile Computing",
"Multimodal Interaction",
"Games"
],
"authors": [
{
"affiliation": "NOVA LINCS and Computer Science Department, Faculdade de Ciências e Tecnologia, Universidade Nova de Lisboa, Quinta da Torre, 2829-516 Caparica, Portugal",
"fullName": "Diogo Cordeiro",
"givenName": "Diogo",
"surname": "Cordeiro",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NOVA LINCS and Computer Science Department, Faculdade de Ciências e Tecnologia, Universidade Nova de Lisboa, Quinta da Torre, 2829-516 Caparica, Portugal",
"fullName": "Nuno Correia",
"givenName": "Nuno",
"surname": "Correia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "M2A, ISEL - Instituto Superior de Engenharia de Lisboa, Instituto Politécnico de Lisboa, Rua Conselheiro Emidio Navarro, 1959-007 Lisboa, Portugal",
"fullName": "Rui Jesus",
"givenName": "Rui",
"surname": "Jesus",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "intetain",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-06-01T00:00:00",
"pubType": "proceedings",
"pages": "22-31",
"year": "2015",
"issn": null,
"isbn": "978-1-6319-0061-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07325480",
"articleId": "12OmNyRPgSl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07325482",
"articleId": "12OmNyQYt2r",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2011/2183/0/06162916",
"title": "“Soul Hunter”: A novel augmented reality application in theme parks",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2011/06162916/12OmNCgrD8W",
"parentPublication": {
"id": "proceedings/ismar/2011/2183/0",
"title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc/2012/4843/0/4843a822",
"title": "Enhancing Traditional Games with Augmented Reality Technologies",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc/2012/4843a822/12OmNCmGNYK",
"parentPublication": {
"id": "proceedings/uic-atc/2012/4843/0",
"title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2012/4663/0/06483988",
"title": "Co-creativity fusions in interdisciplinary augmented reality game developments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2012/06483988/12OmNxdVgUM",
"parentPublication": {
"id": "proceedings/ismar-amh/2012/4663/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671809",
"title": "KITE: Platform for mobile Augmented Reality gaming and interaction using magnetic tracking and depth sensing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671809/12OmNyXMQan",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wf-iot/2014/3459/0/06803110",
"title": "Using Unity 3D to facilitate mobile augmented reality game development",
"doi": null,
"abstractUrl": "/proceedings-article/wf-iot/2014/06803110/12OmNzZWbJy",
"parentPublication": {
"id": "proceedings/wf-iot/2014/3459/0",
"title": "2014 IEEE World Forum on Internet of Things (WF-IoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06239198",
"title": "ARmy: A study of multi-user interaction in spatially augmented games",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239198/12OmNzaQozd",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceee/2019/3910/0/391000a079",
"title": "Desktop Artillery Simulation Using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iceee/2019/391000a079/1cpqGEpXo5O",
"parentPublication": {
"id": "proceedings/iceee/2019/3910/0",
"title": "2019 6th International Conference on Electrical and Electronics Engineering (ICEEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2019/2297/0/229700a399",
"title": "A Kinect-Based Augmented Reality Game for Lower Limb Exercise",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2019/229700a399/1fHknjccUP6",
"parentPublication": {
"id": "proceedings/cw/2019/2297/0",
"title": "2019 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090597",
"title": "AVoidX: An Augmented VR Game",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090597/1jIxmqslcwU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a024",
"title": "Catching the Drone - A Tangible Augmented Reality Game in Superhuman Sports",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a024/1pBMeMETmdW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1fHkkWQ0aEE",
"title": "2019 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1fHknjccUP6",
"doi": "10.1109/CW.2019.00077",
"title": "A Kinect-Based Augmented Reality Game for Lower Limb Exercise",
"normalizedTitle": "A Kinect-Based Augmented Reality Game for Lower Limb Exercise",
"abstract": "Augmented reality (AR) is where 3D virtual objects are integrated into a 3D real environment in real time. The augmented reality applications such as medical visualization, maintenance and repair, robot path planning, entertainment, military aircraft navigation, and targeting applications have been proposed. This paper introduces the development of an augmented reality game which allows the user to carry out lower limb exercise using a natural user interface based on Microsoft Kinect. The system has been designed as an augmented game where users can see themselves in a world augmented with virtual objects generated by computer graphics. The player sitting in a chair just has to step on a mole that appears and disappears by moving upward and downward randomly. It encourages the activities of a large number of lower limb muscles which will help prevent falls. It is also suitable for rehabilitation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented reality (AR) is where 3D virtual objects are integrated into a 3D real environment in real time. The augmented reality applications such as medical visualization, maintenance and repair, robot path planning, entertainment, military aircraft navigation, and targeting applications have been proposed. This paper introduces the development of an augmented reality game which allows the user to carry out lower limb exercise using a natural user interface based on Microsoft Kinect. The system has been designed as an augmented game where users can see themselves in a world augmented with virtual objects generated by computer graphics. The player sitting in a chair just has to step on a mole that appears and disappears by moving upward and downward randomly. It encourages the activities of a large number of lower limb muscles which will help prevent falls. It is also suitable for rehabilitation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented reality (AR) is where 3D virtual objects are integrated into a 3D real environment in real time. The augmented reality applications such as medical visualization, maintenance and repair, robot path planning, entertainment, military aircraft navigation, and targeting applications have been proposed. This paper introduces the development of an augmented reality game which allows the user to carry out lower limb exercise using a natural user interface based on Microsoft Kinect. The system has been designed as an augmented game where users can see themselves in a world augmented with virtual objects generated by computer graphics. The player sitting in a chair just has to step on a mole that appears and disappears by moving upward and downward randomly. It encourages the activities of a large number of lower limb muscles which will help prevent falls. It is also suitable for rehabilitation.",
"fno": "229700a399",
"keywords": [
"Augmented Reality",
"Computer Games",
"Computer Graphics",
"Muscle",
"Patient Rehabilitation",
"User Interfaces",
"Kinect Based Augmented Reality Game",
"Lower Limb Exercise",
"3 D Virtual Objects",
"Augmented Reality Applications",
"Medical Visualization",
"Robot Path Planning",
"Military Aircraft Navigation",
"Natural User Interface",
"Microsoft Kinect",
"Lower Limb Muscles",
"Games",
"Augmented Reality",
"Art",
"Maintenance Engineering",
"Mirrors",
"Visualization",
"Coordinate Measuring Machines",
"Augmented Reality",
"Lower Limb Exercise",
"Rehabilitation",
"Healthcare"
],
"authors": [
{
"affiliation": "Tokyo Polytechnic University",
"fullName": "Yoshimasa Tokuyama",
"givenName": "Yoshimasa",
"surname": "Tokuyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tainan National University of the Arts",
"fullName": "R.P.C. Janaka Rajapakse",
"givenName": "R.P.C. Janaka",
"surname": "Rajapakse",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hitachi Industry & Control Solutions, Ltd.",
"fullName": "Sachiyo Yamabe",
"givenName": "Sachiyo",
"surname": "Yamabe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iwate University",
"fullName": "Kouichi Konno",
"givenName": "Kouichi",
"surname": "Konno",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tainan National University of the Arts",
"fullName": "Yi-Ping Hung",
"givenName": "Yi-Ping",
"surname": "Hung",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "399-402",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-2297-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "229700a395",
"articleId": "1fHklKxvwJ2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "229700a403",
"articleId": "1fHknpU3gnC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-amh/2010/9339/0/05643298",
"title": "OutRun: Exploring seamful design in the development of an augmented reality art project",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2010/05643298/12OmNBSjIW4",
"parentPublication": {
"id": "proceedings/ismar-amh/2010/9339/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2010/6331/0/05460103",
"title": "Augmented Reality Games for Upper-Limb Stroke Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2010/05460103/12OmNwoxSda",
"parentPublication": {
"id": "proceedings/vs-games/2010/6331/0",
"title": "2010 2nd International Conference on Games and Virtual Worlds for Serious Applications (VS-GAMES 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wf-iot/2014/3459/0/06803144",
"title": "Short paper: Calory Battle AR: An extensible mobile augmented reality exergame platform",
"doi": null,
"abstractUrl": "/proceedings-article/wf-iot/2014/06803144/12OmNx9nGF5",
"parentPublication": {
"id": "proceedings/wf-iot/2014/3459/0",
"title": "2014 IEEE World Forum on Internet of Things (WF-IoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2012/4663/0/06483988",
"title": "Co-creativity fusions in interdisciplinary augmented reality game developments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2012/06483988/12OmNxdVgUM",
"parentPublication": {
"id": "proceedings/ismar-amh/2012/4663/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/Ismar-mashd/2015/9628/0/9628a021",
"title": "Augmented \"Ouch!\". How to Create Intersubjective Augmented Objects into Which We can Bump",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a021/12OmNy5hRig",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2015/9628/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/culture-computing/2015/8232/0/8232a041",
"title": "\"Third-Person\" Augmented Reality-Based Interactive Chinese Drama",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2015/8232a041/12OmNy6Zs20",
"parentPublication": {
"id": "proceedings/culture-computing/2015/8232/0",
"title": "2015 International Conference on Culture and Computing (Culture Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sasow/2015/8439/0/8439a080",
"title": "Computational Fields Meet Augmented Reality: Perspectives and Challenges",
"doi": null,
"abstractUrl": "/proceedings-article/sasow/2015/8439a080/12OmNyuPKTK",
"parentPublication": {
"id": "proceedings/sasow/2015/8439/0",
"title": "2015 IEEE International Conference on Self-Adaptive and Self-Organizing Systems Workshops (SASOW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2018/8161/0/08633693",
"title": "Extending Upper Limb User Interactions in AR, VR and MR Headsets Employing a Custom-Made Wearable Device",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2018/08633693/17D45WYQJ59",
"parentPublication": {
"id": "proceedings/iisa/2018/8161/0",
"title": "2018 9th International Conference on Information, Intelligence, Systems and Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a576",
"title": "Magic Mirror on the Wall: Reflecting the Realities of Lower Limb Rehabilitation in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a576/1J7WeNTcphe",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a217",
"title": "Lower Limb Balance Rehabilitation of Post-stroke Patients Using an Evaluating and Training Combined Augmented Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a217/1pBMhnkqb04",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1vg7AGzvxNC",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1vg7QG06UcE",
"doi": "10.1109/ICVRV51359.2020.00092",
"title": "Human-Object Interaction in AR",
"normalizedTitle": "Human-Object Interaction in AR",
"abstract": "With the developments of augmented reality, there are more and more electronic games which take the application of augmented reality. However, a lot of these games lack the influence made by realistic scenes, and connection between realistic objects and the process of games. So the project provides a solution regard to design human-object interaction and game content. Besides, the paper introduces some ideas about the combination between human-object interaction with various game factors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the developments of augmented reality, there are more and more electronic games which take the application of augmented reality. However, a lot of these games lack the influence made by realistic scenes, and connection between realistic objects and the process of games. So the project provides a solution regard to design human-object interaction and game content. Besides, the paper introduces some ideas about the combination between human-object interaction with various game factors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the developments of augmented reality, there are more and more electronic games which take the application of augmented reality. However, a lot of these games lack the influence made by realistic scenes, and connection between realistic objects and the process of games. So the project provides a solution regard to design human-object interaction and game content. Besides, the paper introduces some ideas about the combination between human-object interaction with various game factors.",
"fno": "049700a340",
"keywords": [
"Augmented Reality",
"Computer Games",
"Human Computer Interaction",
"Human Object Interaction",
"Augmented Reality",
"Electronic Games",
"Realistic Scenes",
"Realistic Objects",
"Game Content",
"Game Factors",
"AR",
"Visualization",
"Games",
"Augmented Reality",
"Electronic Games",
"Human Object Interaction",
"Virtual Scene",
"Realistic Scene",
"Emotional Experience"
],
"authors": [
{
"affiliation": "Yanshan University College of Information Science and Engineering The Key Laboratory for Computer Virtual Technology and System Integration of Hebei Province,Qinhuangdao,China",
"fullName": "Yuhan Liu",
"givenName": "Yuhan",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yanshan University College of Information Science and Engineering The Key Laboratory for Computer Virtual Technology and System Integration of Hebei Province,Qinhuangdao,China",
"fullName": "Yong Tang",
"givenName": "Yong",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yanshan University College of Information Science and Engineering,Qinhuangdao,China",
"fullName": "Jiangtao Li",
"givenName": "Jiangtao",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yanshan University College of Information Science and Engineering The Key Laboratory for Computer Virtual Technology and System Integration of Hebei Province,Qinhuangdao,China",
"fullName": "Jing Zhao",
"givenName": "Jing",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yanshan University College of Information Science and Engineering The Key Laboratory for Computer Virtual Technology and System Integration of Hebei Province,Qinhuangdao,China",
"fullName": "Mengya Lv",
"givenName": "Mengya",
"surname": "Lv",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yanshan University College of Information Science and Engineering The Key Laboratory for Computer Virtual Technology and System Integration of Hebei Province,Qinhuangdao,China",
"fullName": "Qian Sun",
"givenName": "Qian",
"surname": "Sun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "340-341",
"year": "2020",
"issn": "2375-141X",
"isbn": "978-1-6654-0497-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1vg7PNO93wY",
"name": "picvrv202004970-09479831s1-mm_049700a340.zip",
"size": "73.4 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/picvrv202004970-09479831s1-mm_049700a340.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "049700a338",
"articleId": "1vg82bCOiUU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "049700a342",
"articleId": "1vg7TbTA6ju",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-amh/2010/9339/0/05643296",
"title": "An integrated design flow in user interface and interaction for enhancing mobile AR gaming experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2010/05643296/12OmNBE7Moa",
"parentPublication": {
"id": "proceedings/ismar-amh/2010/9339/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2017/3588/0/3588a159",
"title": "Evaluating the User Experience of Adult Users in Pok&#xe9;mon GO Game",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2017/3588a159/12OmNBUS78I",
"parentPublication": {
"id": "proceedings/svr/2017/3588/0",
"title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc/2012/4843/0/4843a822",
"title": "Enhancing Traditional Games with Augmented Reality Technologies",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc/2012/4843a822/12OmNCmGNYK",
"parentPublication": {
"id": "proceedings/uic-atc/2012/4843/0",
"title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/segah/2014/4823/0/07067087",
"title": "The Mobile RehApp™: an AR-based mobile game for ankle sprain rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/segah/2014/07067087/12OmNwIHozP",
"parentPublication": {
"id": "proceedings/segah/2014/4823/0",
"title": "2014 IEEE 3rd International Conference on Serious Games and Applications for Health (SeGAH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wf-iot/2014/3459/0/06803144",
"title": "Short paper: Calory Battle AR: An extensible mobile augmented reality exergame platform",
"doi": null,
"abstractUrl": "/proceedings-article/wf-iot/2014/06803144/12OmNx9nGF5",
"parentPublication": {
"id": "proceedings/wf-iot/2014/3459/0",
"title": "2014 IEEE World Forum on Internet of Things (WF-IoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2013/2945/0/06671261",
"title": "Kaidan: An outdoor AR puzzle adventure game",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2013/06671261/12OmNyQph1n",
"parentPublication": {
"id": "proceedings/ismar-amh/2013/2945/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2013/11/0/06728909",
"title": "Argotrainer — Learning go in an augmented-reality environment",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2013/06728909/12OmNzTYBVa",
"parentPublication": {
"id": "proceedings/icat/2013/11/0",
"title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/intetain/2015/0061/0/07325481",
"title": "ARZombie: A mobile augmented reality game with multimodal interaction",
"doi": null,
"abstractUrl": "/proceedings-article/intetain/2015/07325481/12OmNzV70qo",
"parentPublication": {
"id": "proceedings/intetain/2015/0061/0",
"title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06239198",
"title": "ARmy: A study of multi-user interaction in spatially augmented games",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239198/12OmNzaQozd",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2008/04/mcg2008040040",
"title": "Toward Next-Gen Mobile AR Games",
"doi": null,
"abstractUrl": "/magazine/cg/2008/04/mcg2008040040/13rRUxASujW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzmcm0b",
"doi": "10.1109/VR.2016.7504743",
"title": "Estimation of detection thresholds for audiovisual rotation gains",
"normalizedTitle": "Estimation of detection thresholds for audiovisual rotation gains",
"abstract": "Redirection techniques allow users to explore large virtual environments on foot while remaining within a limited physical space. However, research has primarily focused on redirection through manipulation of the visuals used to represent the virtual environment. We describe a within-subjects study (n=31) exploring if participants' ability to detect differences between real and virtual rotations is influenced by the addition of sound that is spatially aligned with its virtual source. The results revealed similar detection thresholds for conditions involving moving audio, static audio, and no audio. This may be viewed as an indication of visual dominance during scenarios such as the one used for the current study.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirection techniques allow users to explore large virtual environments on foot while remaining within a limited physical space. However, research has primarily focused on redirection through manipulation of the visuals used to represent the virtual environment. We describe a within-subjects study (n=31) exploring if participants' ability to detect differences between real and virtual rotations is influenced by the addition of sound that is spatially aligned with its virtual source. The results revealed similar detection thresholds for conditions involving moving audio, static audio, and no audio. This may be viewed as an indication of visual dominance during scenarios such as the one used for the current study.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirection techniques allow users to explore large virtual environments on foot while remaining within a limited physical space. However, research has primarily focused on redirection through manipulation of the visuals used to represent the virtual environment. We describe a within-subjects study (n=31) exploring if participants' ability to detect differences between real and virtual rotations is influenced by the addition of sound that is spatially aligned with its virtual source. The results revealed similar detection thresholds for conditions involving moving audio, static audio, and no audio. This may be viewed as an indication of visual dominance during scenarios such as the one used for the current study.",
"fno": "07504743",
"keywords": [
"Visualization",
"Virtual Environments",
"Electronic Mail",
"Estimation",
"Legged Locomotion",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality",
"H 1 2 Information Systems User Machine Systems Human Factors"
],
"authors": [
{
"affiliation": "Aalborg University",
"fullName": "Niels Christian Nilsson",
"givenName": "Niels Christian",
"surname": "Nilsson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Evan Suma",
"givenName": "Evan",
"surname": "Suma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University",
"fullName": "Rolf Nordahl",
"givenName": "Rolf",
"surname": "Nordahl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Mark Bolas",
"givenName": "Mark",
"surname": "Bolas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University",
"fullName": "Stefania Serafin",
"givenName": "Stefania",
"surname": "Serafin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "241-242",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504742",
"articleId": "12OmNyUFg0I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504744",
"articleId": "12OmNyKJiB6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446062",
"title": "Biomechanical Parameters Under Curvature Gains and Bending Gains in Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446062/13bd1fKQxrR",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260943",
"title": "You Spin my Head Right Round: Threshold of Limited Immersion for Rotation Gains in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260943/13rRUNvyato",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09736631",
"title": "On Rotation Gains Within and Beyond Perceptual Limitations for Seated VR",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09736631/1BN1UtLinTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a620",
"title": "Design of Mentally and Physically Demanding Tasks as Distractors of Rotation Gains",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a620/1CJdavNhwAw",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797976",
"title": "Estimation of Detection Thresholds for Redirected Turning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797976/1cJ0Y99SR1K",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798117",
"title": "Estimation of Rotation Gain Thresholds for Redirected Walking Considering FOV and Gender",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798117/1cJ1fo5PwqY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794563",
"title": "Estimation of Rotation Gain Thresholds Considering FOV, Gender, and Distractors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794563/1dNHkjixhDi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089444",
"title": "Detection Thresholds for Vertical Gains in VR and Drone-based Telepresence Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089444/1jIxcBQWkyA",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a358",
"title": "Revisiting Audiovisual Rotation Gains for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a358/1tnXe22MFJm",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1ftOBCZ",
"doi": "10.1109/VR.2018.8446521",
"title": "Extended Abstract: Natural Human-Robot Interaction in Virtual Reality Telepresence Systems",
"normalizedTitle": "Extended Abstract: Natural Human-Robot Interaction in Virtual Reality Telepresence Systems",
"abstract": "Telepresence systems have the potential to overcome limits and distance constraints of the real-world by enabling people to remotely visit and interact with each other. However, current telepresence systems usually lack natural ways of supporting interaction and exploration of remote environments (REs). In particular, single we-bcams for capturing the RE provide only a limited illusion of spatial presence and movement control of mobile platforms in today's telepresence systems are often restricted to simple interaction devices. One of the main challenges of telepresence systems is to allow users to explore a RE in an immersive, intuitive and natural way, e. g. real walking in the user's local environment (LE), and thus controlling motions of the robot platform in the RE. The goal of the presented research project is to meet these challenges, and contribute to the development and evaluation of novel telep-resence system and interactive behaviours in 360° virtual environments with a focus on full-view telepresence, spatial perception, locomotion, usability and motion sickness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Telepresence systems have the potential to overcome limits and distance constraints of the real-world by enabling people to remotely visit and interact with each other. However, current telepresence systems usually lack natural ways of supporting interaction and exploration of remote environments (REs). In particular, single we-bcams for capturing the RE provide only a limited illusion of spatial presence and movement control of mobile platforms in today's telepresence systems are often restricted to simple interaction devices. One of the main challenges of telepresence systems is to allow users to explore a RE in an immersive, intuitive and natural way, e. g. real walking in the user's local environment (LE), and thus controlling motions of the robot platform in the RE. The goal of the presented research project is to meet these challenges, and contribute to the development and evaluation of novel telep-resence system and interactive behaviours in 360° virtual environments with a focus on full-view telepresence, spatial perception, locomotion, usability and motion sickness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Telepresence systems have the potential to overcome limits and distance constraints of the real-world by enabling people to remotely visit and interact with each other. However, current telepresence systems usually lack natural ways of supporting interaction and exploration of remote environments (REs). In particular, single we-bcams for capturing the RE provide only a limited illusion of spatial presence and movement control of mobile platforms in today's telepresence systems are often restricted to simple interaction devices. One of the main challenges of telepresence systems is to allow users to explore a RE in an immersive, intuitive and natural way, e. g. real walking in the user's local environment (LE), and thus controlling motions of the robot platform in the RE. The goal of the presented research project is to meet these challenges, and contribute to the development and evaluation of novel telep-resence system and interactive behaviours in 360° virtual environments with a focus on full-view telepresence, spatial perception, locomotion, usability and motion sickness.",
"fno": "08446521",
"keywords": [
"Control Engineering Computing",
"Human Robot Interaction",
"Interactive Systems",
"Mobile Robots",
"Telecontrol",
"Telerobotics",
"User Interfaces",
"Virtual Reality",
"Natural Human Robot Interaction",
"Virtual Reality Telepresence Systems",
"Remote Environments",
"Movement Control",
"Interactive Behaviours",
"Full View Telepresence",
"Interaction Devices",
"Spatial Perception",
"Mobile Platforms",
"Telepresence",
"Legged Locomotion",
"Cameras",
"Robot Vision Systems",
"Task Analysis",
"Virtual Reality",
"Telepresence",
"Interaction",
"Locomotion"
],
"authors": [
{
"affiliation": "Department of Informatics, University of Hamburg, Germany",
"fullName": "Jingxin Zhang",
"givenName": "Jingxin",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "812-813",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446466",
"articleId": "13bd1fdV4lE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446133",
"articleId": "13bd1gJ1v06",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/apscc/2011/4624/0/4624a327",
"title": "Interoperable Telepresence Services: Beyond HD-Videoconferences and Towards Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/apscc/2011/4624a327/12OmNCcKQI5",
"parentPublication": {
"id": "proceedings/apscc/2011/4624/0",
"title": "2011 IEEE Asia -Pacific Services Computing Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130239",
"title": "Calibrating dynamic pedestrian route choice with an Extended Range Telepresence System",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130239/12OmNvjgWA3",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08314105",
"title": "Detection Thresholds for Rotation and Translation Gains in 360° Video-Based Telepresence Systems",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08314105/13rRUxASubD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a624",
"title": "AS-EKF: A Delay Aware State Estimation Technique for Telepresence Robot Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a624/18M7h2cgDDO",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aqtr/2022/7933/0/09801925",
"title": "Telepresence Robot for Exploring Protected Natural Areas",
"doi": null,
"abstractUrl": "/proceedings-article/aqtr/2022/09801925/1ErqUQ1knE4",
"parentPublication": {
"id": "proceedings/aqtr/2022/7933/0",
"title": "2022 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a524",
"title": "Synthesizing Novel Spaces for Remote Telepresence Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a524/1J7WaFB7xNC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a718",
"title": "How Far is It? Distance Estimation and Reproduction Through a Double 3 Telepresence Robot",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a718/1J7Wq3RYsx2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089444",
"title": "Detection Thresholds for Vertical Gains in VR and Drone-based Telepresence Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089444/1jIxcBQWkyA",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aqtr/2020/7164/0/09129994",
"title": "Solutions for the design and control of telepresence robots that climb obstacles",
"doi": null,
"abstractUrl": "/proceedings-article/aqtr/2020/09129994/1l6SGBqYCbu",
"parentPublication": {
"id": "proceedings/aqtr/2020/7164/0",
"title": "2020 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a346",
"title": "Tactile Telepresence for Isolated Patients",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a346/1yeQGRM0HLi",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxcBQWkyA",
"doi": "10.1109/VR46266.2020.00028",
"title": "Detection Thresholds for Vertical Gains in VR and Drone-based Telepresence Systems",
"normalizedTitle": "Detection Thresholds for Vertical Gains in VR and Drone-based Telepresence Systems",
"abstract": "Several redirected walking techniques have been introduced and analyzed in recent years, while the main focus was on manipulations in horizontal directions, in particular, by means of curvature, rotation, and translation gains. However, less research has been conducted on the manipulation of vertical movements and its possible use as a redirection technique. Actually, vertical movements are fundamentally important, e.g., for remotely steering a drone using a virtual reality headset.In this paper, we explored vertical gains, a novel redirection technique, which enables us to purposefully manipulate the mapping of the user’s physical vertical movements to movements in the virtual space and the remote space. This approach allows natural and more active physical control of a real drone. To demonstrate the usability of vertical gains, we implemented a telepresence drone and vertical redirection techniques for stretching and crouching actions using common VR devices. We conducted two user studies to investigate the effective manipulation ranges and its usability: one study using a virtual environment (VE), and one using a camera stream from a telepresence drone. The results revealed that our technique could manipulate a users vertical movement without her/his noticing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Several redirected walking techniques have been introduced and analyzed in recent years, while the main focus was on manipulations in horizontal directions, in particular, by means of curvature, rotation, and translation gains. However, less research has been conducted on the manipulation of vertical movements and its possible use as a redirection technique. Actually, vertical movements are fundamentally important, e.g., for remotely steering a drone using a virtual reality headset.In this paper, we explored vertical gains, a novel redirection technique, which enables us to purposefully manipulate the mapping of the user’s physical vertical movements to movements in the virtual space and the remote space. This approach allows natural and more active physical control of a real drone. To demonstrate the usability of vertical gains, we implemented a telepresence drone and vertical redirection techniques for stretching and crouching actions using common VR devices. We conducted two user studies to investigate the effective manipulation ranges and its usability: one study using a virtual environment (VE), and one using a camera stream from a telepresence drone. The results revealed that our technique could manipulate a users vertical movement without her/his noticing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Several redirected walking techniques have been introduced and analyzed in recent years, while the main focus was on manipulations in horizontal directions, in particular, by means of curvature, rotation, and translation gains. However, less research has been conducted on the manipulation of vertical movements and its possible use as a redirection technique. Actually, vertical movements are fundamentally important, e.g., for remotely steering a drone using a virtual reality headset.In this paper, we explored vertical gains, a novel redirection technique, which enables us to purposefully manipulate the mapping of the user’s physical vertical movements to movements in the virtual space and the remote space. This approach allows natural and more active physical control of a real drone. To demonstrate the usability of vertical gains, we implemented a telepresence drone and vertical redirection techniques for stretching and crouching actions using common VR devices. We conducted two user studies to investigate the effective manipulation ranges and its usability: one study using a virtual environment (VE), and one using a camera stream from a telepresence drone. The results revealed that our technique could manipulate a users vertical movement without her/his noticing.",
"fno": "09089444",
"keywords": [
"Drones",
"Telepresence",
"Legged Locomotion",
"Resists",
"Virtual Reality",
"Aerospace Electronics",
"Three Dimensional Displays",
"Drone",
"Vertical Movement",
"Redirection",
"Telepresence"
],
"authors": [
{
"affiliation": "The University of Tokyo,Cyber Interface Lab.",
"fullName": "Keigo Matsumoto",
"givenName": "Keigo",
"surname": "Matsumoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Hamburg,Human-Computer Interaction",
"fullName": "Eike Langbehn",
"givenName": "Eike",
"surname": "Langbehn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo,Cyber Interface Lab.",
"fullName": "Takuji Narumi",
"givenName": "Takuji",
"surname": "Narumi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Hamburg,Human-Computer Interaction",
"fullName": "Frank Steinicke",
"givenName": "Frank",
"surname": "Steinicke",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "101-107",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089519",
"articleId": "1jIxelz8ZMs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089599",
"articleId": "1jIx8SwZIuQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2018/4886/0/488601c010",
"title": "Delay Compensation for Actuated Stereoscopic 360 Degree Telepresence Systems with Probabilistic Head Motion Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601c010/12OmNy68EOY",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504743",
"title": "Estimation of detection thresholds for audiovisual rotation gains",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504743/12OmNzmcm0b",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446521",
"title": "Extended Abstract: Natural Human-Robot Interaction in Virtual Reality Telepresence Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446521/13bd1ftOBCZ",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260943",
"title": "You Spin my Head Right Round: Threshold of Limited Immersion for Rotation Gains in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260943/13rRUNvyato",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08314105",
"title": "Detection Thresholds for Rotation and Translation Gains in 360° Video-Based Telepresence Systems",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08314105/13rRUxASubD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642375",
"title": "Immersive Telepresence and Remote Collaboration using Mobile and Wearable Devices",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642375/17PYEk3WIil",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09736631",
"title": "On Rotation Gains Within and Beyond Perceptual Limitations for Seated VR",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09736631/1BN1UtLinTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798143",
"title": "Estimating Detection Thresholds for Desktop-Scale Hand Redirection in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798143/1cJ0GRxSQwM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798367",
"title": "Falconer: A Tethered Aerial Companion for Enhancing Personal Space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798367/1cJ0HDC3pa8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797989",
"title": "Redirected Jumping: Imperceptibly Manipulating Jump Motions in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797989/1cJ15zHucrC",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1l6SEkhebMk",
"title": "2020 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR)",
"acronym": "aqtr",
"groupId": "1001746",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1l6SGBqYCbu",
"doi": "10.1109/AQTR49680.2020.9129994",
"title": "Solutions for the design and control of telepresence robots that climb obstacles",
"normalizedTitle": "Solutions for the design and control of telepresence robots that climb obstacles",
"abstract": "Telepresence robots are useful in various fields such as education, health or remote exploration of certain territories. In our previous research, we have developed telepresence robots capable of moving using wheels and being able to communicate data via Bluetooth or direct Wi-Fi. Problems arise with these applications when the robot must climb stairs, move on rough terrain or transmit data over longer distances. In this paper we come up with solutions for both the mechanical design part and the distance data exchange and remote control. Currently, there are robots on the market that move either with wheels, with feet or with wheels included in the feet. Our innovative solution is to create a robot that has both wheels and legs, which they can use according to their needs. For data communications we have two solutions: Wi-Fi and Li-Fi communications in areas where Wi-Fi communications are prohibited. Such telepresence robots will be able to be used in the transport and monitoring of persons with disabilities. Further research will focus on optimizing the proposed solutions and on the autonomy of the robots.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Telepresence robots are useful in various fields such as education, health or remote exploration of certain territories. In our previous research, we have developed telepresence robots capable of moving using wheels and being able to communicate data via Bluetooth or direct Wi-Fi. Problems arise with these applications when the robot must climb stairs, move on rough terrain or transmit data over longer distances. In this paper we come up with solutions for both the mechanical design part and the distance data exchange and remote control. Currently, there are robots on the market that move either with wheels, with feet or with wheels included in the feet. Our innovative solution is to create a robot that has both wheels and legs, which they can use according to their needs. For data communications we have two solutions: Wi-Fi and Li-Fi communications in areas where Wi-Fi communications are prohibited. Such telepresence robots will be able to be used in the transport and monitoring of persons with disabilities. Further research will focus on optimizing the proposed solutions and on the autonomy of the robots.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Telepresence robots are useful in various fields such as education, health or remote exploration of certain territories. In our previous research, we have developed telepresence robots capable of moving using wheels and being able to communicate data via Bluetooth or direct Wi-Fi. Problems arise with these applications when the robot must climb stairs, move on rough terrain or transmit data over longer distances. In this paper we come up with solutions for both the mechanical design part and the distance data exchange and remote control. Currently, there are robots on the market that move either with wheels, with feet or with wheels included in the feet. Our innovative solution is to create a robot that has both wheels and legs, which they can use according to their needs. For data communications we have two solutions: Wi-Fi and Li-Fi communications in areas where Wi-Fi communications are prohibited. Such telepresence robots will be able to be used in the transport and monitoring of persons with disabilities. Further research will focus on optimizing the proposed solutions and on the autonomy of the robots.",
"fno": "09129994",
"keywords": [
"Bluetooth",
"Collision Avoidance",
"Control Engineering Computing",
"Human Robot Interaction",
"Telerobotics",
"Wheels",
"Wireless LAN",
"Telepresence Robots",
"Rough Terrain",
"Distance Data Exchange",
"Data Communications",
"Li Fi Communications",
"Wi Fi Communications",
"Legged Locomotion",
"Telepresence",
"Wheelchairs",
"Wheels",
"Stairs",
"Mobile Robots",
"Robots",
"Design",
"Control",
"Telepresence",
"Walking",
"Robot"
],
"authors": [
{
"affiliation": "Faculty of Electronics, Telecommunications and Information Technologies, Technical University from Cluj-Napoca,Cluj-Napoca,Romania",
"fullName": "Paul Ţoţa",
"givenName": "Paul",
"surname": "Ţoţa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Electronics, Telecommunications and Information Technologies, Technical University from Cluj-Napoca,Cluj-Napoca,Romania",
"fullName": "Mircea-F. Vaida",
"givenName": "Mircea-F.",
"surname": "Vaida",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aqtr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-05-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7164-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09129943",
"articleId": "1l6SJzdG8Hm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09129950",
"articleId": "1l6SIVClTzi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/aqtr/2018/2205/0/08402729",
"title": "Design and development of the hybrid mobile robots",
"doi": null,
"abstractUrl": "/proceedings-article/aqtr/2018/08402729/12OmNAOKnQC",
"parentPublication": {
"id": "proceedings/aqtr/2018/2205/0",
"title": "2018 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981a793",
"title": "A Ubiquitous Computing Platform - Affordable Telepresence Robot Design and Applications",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981a793/12OmNAnuTr1",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2018/6049/0/604901a319",
"title": "A Study to Design VI Classrooms Using Virtual Reality Aided Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a319/12OmNxvwoOG",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aqtr/2022/7933/0/09801925",
"title": "Telepresence Robot for Exploring Protected Natural Areas",
"doi": null,
"abstractUrl": "/proceedings-article/aqtr/2022/09801925/1ErqUQ1knE4",
"parentPublication": {
"id": "proceedings/aqtr/2022/7933/0",
"title": "2022 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a524",
"title": "Synthesizing Novel Spaces for Remote Telepresence Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a524/1J7WaFB7xNC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2022/7260/0/726000a103",
"title": "State Estimation for Hybrid Locomotion of Driving-Stepping Quadrupeds",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2022/726000a103/1Kckhe8Ichi",
"parentPublication": {
"id": "proceedings/irc/2022/7260/0",
"title": "2022 Sixth IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crc/2019/4620/0/462000a075",
"title": "Autonomous Step Climbing Strategy Using a Wheelchair and Care Robot",
"doi": null,
"abstractUrl": "/proceedings-article/crc/2019/462000a075/1iTuIBfdsDS",
"parentPublication": {
"id": "proceedings/crc/2019/4620/0",
"title": "2019 4th International Conference on Control, Robotics and Cybernetics (CRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeas/2020/9272/0/927200a217",
"title": "A novel design of the intelligent stair-climbing wheelchair",
"doi": null,
"abstractUrl": "/proceedings-article/icmeas/2020/927200a217/1rsiDJt7HaM",
"parentPublication": {
"id": "proceedings/icmeas/2020/9272/0",
"title": "2020 6th International Conference on Mechanical Engineering and Automation Science (ICMEAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a661",
"title": "A Telepresence System using Toy Robots the Users can Assemble and Manipulate with Finger Plays and Hand Shadow",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a661/1tnXrE2tyoM",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2023/04/09537616",
"title": "TIUI: Touching Live Video for Telepresence Operation",
"doi": null,
"abstractUrl": "/journal/tm/2023/04/09537616/1wTinsFrkju",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzcxZeL",
"title": "2014 Brazilian Symposium on Computer Games and Digital Entertainment (SBGAMES)",
"acronym": "sbgames",
"groupId": "1800056",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvSKNTy",
"doi": "10.1109/SBGAMES.2014.15",
"title": "Expressive Reproduced Characters for Games",
"normalizedTitle": "Expressive Reproduced Characters for Games",
"abstract": "Virtual tridimensional creatures are active actors in many types of games. Some of these games require, in addition to quantity, the simulation of kinship and evolution, not only of human character models but also of different types of animals, toon models or other creatures. Some applications also require interactions between isolated populations with well-defined ethnic characteristics. The identification of similar traits between individuals of the same family is crucial to providing increased realism. The main difficulty in these situations is to generate models automatically, in real time, which are physically similar to a given population or family. Another desirable feature is the automatic generation of facial expressions as the character interacts with the environment. In those cases, the difficulty lies in finding a simple mesh adaptation system for different creatures with big differences in shape. In this work, the reproduction of diploid beings is mimicked to produce character models that inherit genetic characteristics from their ancestors, with the possibility to map all genes identifying the origin of each gene, and apply custom facial expressions. With this solution it is possible to create interactive evolution and life-simulation games, genetic educational applications, and many other possibilities.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual tridimensional creatures are active actors in many types of games. Some of these games require, in addition to quantity, the simulation of kinship and evolution, not only of human character models but also of different types of animals, toon models or other creatures. Some applications also require interactions between isolated populations with well-defined ethnic characteristics. The identification of similar traits between individuals of the same family is crucial to providing increased realism. The main difficulty in these situations is to generate models automatically, in real time, which are physically similar to a given population or family. Another desirable feature is the automatic generation of facial expressions as the character interacts with the environment. In those cases, the difficulty lies in finding a simple mesh adaptation system for different creatures with big differences in shape. In this work, the reproduction of diploid beings is mimicked to produce character models that inherit genetic characteristics from their ancestors, with the possibility to map all genes identifying the origin of each gene, and apply custom facial expressions. With this solution it is possible to create interactive evolution and life-simulation games, genetic educational applications, and many other possibilities.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual tridimensional creatures are active actors in many types of games. Some of these games require, in addition to quantity, the simulation of kinship and evolution, not only of human character models but also of different types of animals, toon models or other creatures. Some applications also require interactions between isolated populations with well-defined ethnic characteristics. The identification of similar traits between individuals of the same family is crucial to providing increased realism. The main difficulty in these situations is to generate models automatically, in real time, which are physically similar to a given population or family. Another desirable feature is the automatic generation of facial expressions as the character interacts with the environment. In those cases, the difficulty lies in finding a simple mesh adaptation system for different creatures with big differences in shape. In this work, the reproduction of diploid beings is mimicked to produce character models that inherit genetic characteristics from their ancestors, with the possibility to map all genes identifying the origin of each gene, and apply custom facial expressions. With this solution it is possible to create interactive evolution and life-simulation games, genetic educational applications, and many other possibilities.",
"fno": "8065a210",
"keywords": [
"Solid Modeling",
"Biological Cells",
"Genetics",
"Adaptation Models",
"Face",
"Skin",
"Games",
"Facial Expression",
"Virtual Characters",
"Reproduction Simulation"
],
"authors": [
{
"affiliation": null,
"fullName": "Roberto Cesar Cavalcante Vieira",
"givenName": "Roberto Cesar Cavalcante",
"surname": "Vieira",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Creto Augusto Vidal",
"givenName": "Creto Augusto",
"surname": "Vidal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Joaquim Bento Cavalcante-Neto",
"givenName": "Joaquim Bento",
"surname": "Cavalcante-Neto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sbgames",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-11-01T00:00:00",
"pubType": "proceedings",
"pages": "210-219",
"year": "2014",
"issn": "2159-6662",
"isbn": "978-1-4799-8065-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8065a202",
"articleId": "12OmNxTEiQ2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8065a220",
"articleId": "12OmNzX6cpM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icnc/2008/3304/6/3304f032",
"title": "Emotion-Based Synthetic Characters in Games",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2008/3304f032/12OmNBCqbIX",
"parentPublication": {
"id": "proceedings/icnc/2008/3304/6",
"title": "2008 Fourth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2017/5812/0/08056604",
"title": "Expressive virtual characters for social demonstration games",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2017/08056604/12OmNBpmDNS",
"parentPublication": {
"id": "proceedings/vs-games/2017/5812/0",
"title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2012/4779/0/4779a889",
"title": "An Authoring Tool for Flash Games in ActionScript3.0",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2012/4779a889/12OmNqOffuZ",
"parentPublication": {
"id": "proceedings/nbis/2012/4779/0",
"title": "2012 15th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/passat-socialcom/2011/1931/0/06113103",
"title": "Coevolving Strategies in Social-Elimination Games",
"doi": null,
"abstractUrl": "/proceedings-article/passat-socialcom/2011/06113103/12OmNrJiCN9",
"parentPublication": {
"id": "proceedings/passat-socialcom/2011/1931/0",
"title": "2011 IEEE Third Int'l Conference on Privacy, Security, Risk and Trust (PASSAT) / 2011 IEEE Third Int'l Conference on Social Computing (SocialCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444803",
"title": "Simulation of genetic inheritance in the generation of virtual characters",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444803/12OmNyxXlwe",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2011/4501/0/4501b049",
"title": "The Application of AI for the Non Player Character in Computer Games",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2011/4501b049/12OmNzayNtp",
"parentPublication": {
"id": "proceedings/iccis/2011/4501/0",
"title": "2011 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08267290",
"title": "The Effect of Realistic Appearance of Virtual Characters in Immersive Environments - Does the Character's Personality Play a Role?",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08267290/13rRUwghd57",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020271",
"title": "Ethical Issues in Automatic Dialogue Generation for Non-Player Characters in Digital Games",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020271/1KfSVv5qwvu",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a458",
"title": "Play with Emotional Characters: Improving User Emotional Experience by A Data-driven Approach in VR Volleyball Games",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a458/1tnWZju755K",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2021/0189/0/018900a182",
"title": "Two Level Control of Non-Player Characters for Navigation in 3D Games Scenes: A Deep Reinforcement Learning Approach",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2021/018900a182/1zusrnO528M",
"parentPublication": {
"id": "proceedings/sbgames/2021/0189/0",
"title": "2021 20th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAiSpZ",
"title": "2015 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvjyxwr",
"doi": "10.1109/VR.2015.7223346",
"title": "An evaluation of virtual human appearance fidelity on user's positive and negative affect in human-virtual human interaction",
"normalizedTitle": "An evaluation of virtual human appearance fidelity on user's positive and negative affect in human-virtual human interaction",
"abstract": "The effectiveness of visual realism of virtual characters in engaging users and eliciting affective responses has been an open question. We empirically evaluated the effects of realistic vs. non-realistic rendering of virtual humans on the emotional response of participants in a medical virtual reality system that was designed to educate users to recognize the signs and symptoms of patient deterioration. In a between-subjects experiment protocol, participants interacted with one of three different appearances of a virtual patient, namely realistic, non-realistic cartoon-shaded and charcoal-sketch like conditions. Emotional impact of the rendering conditions was measured via a combination of subjective and objective metrics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The effectiveness of visual realism of virtual characters in engaging users and eliciting affective responses has been an open question. We empirically evaluated the effects of realistic vs. non-realistic rendering of virtual humans on the emotional response of participants in a medical virtual reality system that was designed to educate users to recognize the signs and symptoms of patient deterioration. In a between-subjects experiment protocol, participants interacted with one of three different appearances of a virtual patient, namely realistic, non-realistic cartoon-shaded and charcoal-sketch like conditions. Emotional impact of the rendering conditions was measured via a combination of subjective and objective metrics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The effectiveness of visual realism of virtual characters in engaging users and eliciting affective responses has been an open question. We empirically evaluated the effects of realistic vs. non-realistic rendering of virtual humans on the emotional response of participants in a medical virtual reality system that was designed to educate users to recognize the signs and symptoms of patient deterioration. In a between-subjects experiment protocol, participants interacted with one of three different appearances of a virtual patient, namely realistic, non-realistic cartoon-shaded and charcoal-sketch like conditions. Emotional impact of the rendering conditions was measured via a combination of subjective and objective metrics.",
"fno": "07223346",
"keywords": [
"Rendering Computer Graphics",
"Solid Modeling",
"Visualization",
"Virtual Reality",
"Biomedical Imaging",
"Atmospheric Measurements",
"Particle Measurements",
"Rendering",
"Virtual Digital Characters",
"Psychology",
"User Studies"
],
"authors": [
{
"affiliation": "Clemson University",
"fullName": "Himanshu Chaturvedi",
"givenName": "Himanshu",
"surname": "Chaturvedi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Nathan D. Newsome",
"givenName": "Nathan D.",
"surname": "Newsome",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Sabarish V. Babu",
"givenName": "Sabarish V.",
"surname": "Babu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "163-164",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1727-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07223345",
"articleId": "12OmNC943Fw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07223347",
"articleId": "12OmNqNoscN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2013/5048/0/5048a300",
"title": "How Virtual Teammate Support Types Affect Stress",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a300/12OmNwBBqcc",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2016/2722/0/07590370",
"title": "The Subjective Well-Being via Virtual Worlds Experience",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2016/07590370/12OmNxQOjHr",
"parentPublication": {
"id": "proceedings/vs-games/2016/2722/0",
"title": "2016 8th International Conference on Games and Virtual Worlds for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404626",
"title": "Effects of Virtual Human Animation on Emotion Contagion in Simulated Inter-Personal Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404626/13rRUxASubA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07383334",
"title": "Effects of Virtual Human Appearance Fidelity on Emotion Contagion in Affective Inter-Personal Simulations",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07383334/13rRUygBw7c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2018/7123/0/08493419",
"title": "Effects of Graphical Styles on Emotional States for VR-Supported Psychotherapy",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493419/14tNJnLIk5b",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2021/03/08642402",
"title": "First Impressions Count! The Role of the Human's Emotional State on Rapport Established with an Empathic versus Neutral Virtual Therapist",
"doi": null,
"abstractUrl": "/journal/ta/2021/03/08642402/17PYEmawc80",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a193",
"title": "Comparing Meditation and Immersive Virtual Environment for Relaxation",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a193/1KmFfgROQxO",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vhcie/2017/2758/0/07935624",
"title": "Evaluating collision avoidance effects on discomfort in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vhcie/2017/07935624/1h0Lhmayehq",
"parentPublication": {
"id": "proceedings/vhcie/2017/2758/0",
"title": "2017 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a209",
"title": "A Virtual Reality Framework for Human-Virtual Crowd Interaction Studies",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a209/1qpzBFKHFpC",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a437",
"title": "Focus Group on Social Virtual Reality in Social Virtual Reality: Effects on Emotion and Self-Awareness",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a437/1yeQD8KNChO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvRU0cM",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwJybU7",
"doi": "10.1109/ISMAR-Adjunct.2017.45",
"title": "[POSTER] Believable Virtual Characters for Mixed Reality",
"normalizedTitle": "[POSTER] Believable Virtual Characters for Mixed Reality",
"abstract": "This poster discusses the implementation and technical choices of a proof of concept experience demonstrating interactive virtual characters for immersive mixed reality (MR) applications. Our interactive virtual characters are made believable through their visual appearance, physical interaction with the environment, dynamic behavior to the user and both real and virtual stimuli, and by being able to affect actual change to the real world (using Internet of Things devices). We also propose that the inclusion of believable virtual characters can reinforce the overall plausibility of a MR scenario.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This poster discusses the implementation and technical choices of a proof of concept experience demonstrating interactive virtual characters for immersive mixed reality (MR) applications. Our interactive virtual characters are made believable through their visual appearance, physical interaction with the environment, dynamic behavior to the user and both real and virtual stimuli, and by being able to affect actual change to the real world (using Internet of Things devices). We also propose that the inclusion of believable virtual characters can reinforce the overall plausibility of a MR scenario.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This poster discusses the implementation and technical choices of a proof of concept experience demonstrating interactive virtual characters for immersive mixed reality (MR) applications. Our interactive virtual characters are made believable through their visual appearance, physical interaction with the environment, dynamic behavior to the user and both real and virtual stimuli, and by being able to affect actual change to the real world (using Internet of Things devices). We also propose that the inclusion of believable virtual characters can reinforce the overall plausibility of a MR scenario.",
"fno": "6327a121",
"keywords": [
"Virtual Reality",
"Three Dimensional Displays",
"Navigation",
"Solid Modeling",
"Cameras",
"Robot Kinematics",
"Mixed Reality",
"Virtual Characters",
"Video Games",
"Agents"
],
"authors": [
{
"affiliation": null,
"fullName": "Jorge Arroyo-Palacios",
"givenName": "Jorge",
"surname": "Arroyo-Palacios",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Richard Marks",
"givenName": "Richard",
"surname": "Marks",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "121-123",
"year": "2017",
"issn": null,
"isbn": "978-0-7695-6327-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6327a115",
"articleId": "12OmNzcxZ6D",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6327a124",
"articleId": "12OmNqC2uYn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948451",
"title": "[Poster] Turbidity-based aerial perspective rendering for mixed reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948451/12OmNqyDjpg",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a155",
"title": "[POSTER] MR TV Mozaik: A New Mixed Reality Interactive TV Experience",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a155/12OmNz3bdL7",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07829404",
"title": "MR360: Mixed Reality Rendering for 360° Panoramic Videos",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07829404/13rRUwhHcQW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2006/2671/0/04030822",
"title": "Integrating a Real-Time Captured Object into Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2006/04030822/14dcEdLb3x5",
"parentPublication": {
"id": "proceedings/cw/2006/2671/0",
"title": "2006 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a475",
"title": "Design and Implementation of Virtual-Real Interactive System for Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a475/19wB3F7hACQ",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icict/2022/6960/0/696000a153",
"title": "Mixed reality (MR) Enabled Proprio and Teleoperation of a Humanoid Robot for Paraplegic Patients",
"doi": null,
"abstractUrl": "/proceedings-article/icict/2022/696000a153/1FJ5bdmciJO",
"parentPublication": {
"id": "proceedings/icict/2022/6960/0",
"title": "2022 5th International Conference on Information and Computer Technologies (ICICT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a865",
"title": "Learning and Teaching Fluid Dynamics using Augmented and Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a865/1J7Wr5spc76",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a657",
"title": "Mixed Reality for Engineering Design Review Using Finite Element Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a657/1J7WwCL6CCQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798180",
"title": "Architectural Design in Virtual Reality and Mixed Reality Environments: A Comparative Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798180/1cJ1bDktgoU",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090675",
"title": "Developing Embodied Interactive Virtual Characters for Human-Subjects Studies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090675/1jIxqoL3kbK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxmuXW5Es",
"doi": "10.1109/VRW50115.2020.00226",
"title": "Perception of Head Motion Effect on Emotional Facial Expression in Virtual Reality",
"normalizedTitle": "Perception of Head Motion Effect on Emotional Facial Expression in Virtual Reality",
"abstract": "In this paper we present a study investigating the impact of head motion on realism, its effect on perceived emotional intensity, and how it affects the affinity for facial expressions. The purpose of this work is to enhance the realism of interactive virtual characters. We designed an experiment to measure the impact through a combination of methods. This included subject behavioural data rating designed facial animations in Virtual Reality (VR) and questionnaire ratings. The results showed that head motions had a positive impact on facial expressions, they enhance realism, perceived emotional intensity, and affinity for virtual characters.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we present a study investigating the impact of head motion on realism, its effect on perceived emotional intensity, and how it affects the affinity for facial expressions. The purpose of this work is to enhance the realism of interactive virtual characters. We designed an experiment to measure the impact through a combination of methods. This included subject behavioural data rating designed facial animations in Virtual Reality (VR) and questionnaire ratings. The results showed that head motions had a positive impact on facial expressions, they enhance realism, perceived emotional intensity, and affinity for virtual characters.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we present a study investigating the impact of head motion on realism, its effect on perceived emotional intensity, and how it affects the affinity for facial expressions. The purpose of this work is to enhance the realism of interactive virtual characters. We designed an experiment to measure the impact through a combination of methods. This included subject behavioural data rating designed facial animations in Virtual Reality (VR) and questionnaire ratings. The results showed that head motions had a positive impact on facial expressions, they enhance realism, perceived emotional intensity, and affinity for virtual characters.",
"fno": "09090662",
"keywords": [
"Videos",
"Facial Animation",
"Three Dimensional Displays",
"Virtual Reality",
"Resists",
"Dynamics",
"Realism",
"Head Motion",
"Facial Expression",
"Emotion",
"Virtual Reality"
],
"authors": [
{
"affiliation": "University of Portsmouth",
"fullName": "Qiongdan Cao",
"givenName": "Qiongdan",
"surname": "Cao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Portsmouth",
"fullName": "Hui Yu",
"givenName": "Hui",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Emteq Ltd, Brighton",
"fullName": "Charles Nduka",
"givenName": "Charles",
"surname": "Nduka",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "750-751",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090498",
"articleId": "1jIxogcnA3K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090639",
"articleId": "1jIxvN0ibpS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2015/9953/0/07344583",
"title": "Decoupling facial expressions and head motions in complex emotions",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344583/12OmNB9t6qd",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349579",
"title": "Pleasure-arousal-dominance driven facial expression simulation",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349579/12OmNBLdKEh",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/2/01048355",
"title": "Mapping emotional status to facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/01048355/12OmNBW0vFt",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/2",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/devlrn/2005/9226/0/01490973",
"title": "Emotional elicitation by dynamic facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/devlrn/2005/01490973/12OmNBt3qpZ",
"parentPublication": {
"id": "proceedings/devlrn/2005/9226/0",
"title": "International Conference on Development and Learning",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2006/2606/0/26060428",
"title": "Facial Animation Using Emotional Model",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2006/26060428/12OmNwCaCrJ",
"parentPublication": {
"id": "proceedings/cgiv/2006/2606/0",
"title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2013/5005/0/5005a444",
"title": "Impressive Scene Detection from Lifelog Videos by Unsupervised Facial Expression Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2013/5005a444/12OmNx57HO5",
"parentPublication": {
"id": "proceedings/snpd/2013/5005/0",
"title": "2013 14th ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2017/5812/0/08056588",
"title": "When facial expressions dominate emotion perception in groups of virtual characters",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2017/08056588/12OmNz61d7s",
"parentPublication": {
"id": "proceedings/vs-games/2017/5812/0",
"title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349549",
"title": "Perception of emotional expressions in different representations using facial feature points",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349549/12OmNzUgdes",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/viz/2009/3734/0/3734a061",
"title": "Considerations for Believable Emotional Facial Expression Animation",
"doi": null,
"abstractUrl": "/proceedings-article/viz/2009/3734a061/12OmNzZmZrJ",
"parentPublication": {
"id": "proceedings/viz/2009/3734/0",
"title": "Visualisation, International Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049691",
"title": "Emotional Voice Puppetry",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049691/1KYouSCDkQM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAUKpERUc",
"doi": "10.1109/VR50410.2021.00024",
"title": "Toward Understanding the Effects of Virtual Character Appearance on Avoidance Movement Behavior",
"normalizedTitle": "Toward Understanding the Effects of Virtual Character Appearance on Avoidance Movement Behavior",
"abstract": "This virtual reality study was conducted to assess the impact of the appearance of virtual characters on the avoidance movement behavior of participants. Five experimental conditions were examined. Under each condition, one of the five different virtual characters (classified as mannequin, human, cartoon, robot, and zombie) was studied. Each participant had to experience only one condition and was asked to perform the collision avoidance tasks two times. During the walking task, the motion of participants was recorded. After finishing the collision avoidance segment of the study, a questionnaire that examined different concepts (emotional reactivity, emotional contagion, attentional allocation, behavioral independence, perceived skill, presence, immersion, virtual character realism, and virtual character unpleasantness) was distributed to the participants. Based on the collected measurements (avoidance movement behavior and self-reported ratings), we tried to understand the effects of the appearance of a virtual character on the avoidance movement behavior, and its possible correlation to subjective ratings. The results obtained from this study indicated that the appearance of the virtual characters did affect the avoidance movement behavior and also some of the examined concepts. Additionally, participant avoidance movement behavior correlates with some subjective ratings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This virtual reality study was conducted to assess the impact of the appearance of virtual characters on the avoidance movement behavior of participants. Five experimental conditions were examined. Under each condition, one of the five different virtual characters (classified as mannequin, human, cartoon, robot, and zombie) was studied. Each participant had to experience only one condition and was asked to perform the collision avoidance tasks two times. During the walking task, the motion of participants was recorded. After finishing the collision avoidance segment of the study, a questionnaire that examined different concepts (emotional reactivity, emotional contagion, attentional allocation, behavioral independence, perceived skill, presence, immersion, virtual character realism, and virtual character unpleasantness) was distributed to the participants. Based on the collected measurements (avoidance movement behavior and self-reported ratings), we tried to understand the effects of the appearance of a virtual character on the avoidance movement behavior, and its possible correlation to subjective ratings. The results obtained from this study indicated that the appearance of the virtual characters did affect the avoidance movement behavior and also some of the examined concepts. Additionally, participant avoidance movement behavior correlates with some subjective ratings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This virtual reality study was conducted to assess the impact of the appearance of virtual characters on the avoidance movement behavior of participants. Five experimental conditions were examined. Under each condition, one of the five different virtual characters (classified as mannequin, human, cartoon, robot, and zombie) was studied. Each participant had to experience only one condition and was asked to perform the collision avoidance tasks two times. During the walking task, the motion of participants was recorded. After finishing the collision avoidance segment of the study, a questionnaire that examined different concepts (emotional reactivity, emotional contagion, attentional allocation, behavioral independence, perceived skill, presence, immersion, virtual character realism, and virtual character unpleasantness) was distributed to the participants. Based on the collected measurements (avoidance movement behavior and self-reported ratings), we tried to understand the effects of the appearance of a virtual character on the avoidance movement behavior, and its possible correlation to subjective ratings. The results obtained from this study indicated that the appearance of the virtual characters did affect the avoidance movement behavior and also some of the examined concepts. Additionally, participant avoidance movement behavior correlates with some subjective ratings.",
"fno": "255600a040",
"keywords": [
"Collision Avoidance",
"Human Factors",
"Virtual Reality",
"Virtual Character Unpleasantness",
"Participant Avoidance Movement Behavior",
"Virtual Character Appearance",
"Virtual Reality",
"Virtual Characters",
"Collision Avoidance",
"Collision Avoidance Segment",
"Virtual Character Realism",
"Emotional Reactivity",
"Emotional Contagion",
"Attentional Allocation",
"Behavioral Independence",
"Perceived Skill",
"Three Dimensional Displays",
"Motion Segmentation",
"Virtual Reality",
"User Interfaces",
"Particle Measurements",
"Resource Management",
"Motion Measurement",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Purdue University,Indiana,West Lafayette,U.S.A.",
"fullName": "Christos Mousas",
"givenName": "Christos",
"surname": "Mousas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of the Aegean,Mytilene,Greece",
"fullName": "Alexandros Koilias",
"givenName": "Alexandros",
"surname": "Koilias",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Southern Illinois University,Carbondale,Illinois,U.S.A.",
"fullName": "Banafsheh Rekabdar",
"givenName": "Banafsheh",
"surname": "Rekabdar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University,Indiana,West Lafayette,U.S.A.",
"fullName": "Dominic Kao",
"givenName": "Dominic",
"surname": "Kao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Southern Illinois University,Carbondale,Illinois,U.S.A.",
"fullName": "Dimitris Anastaslou",
"givenName": "Dimitris",
"surname": "Anastaslou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "40-49",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "255600a031",
"articleId": "1tuArQOYe9q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a050",
"articleId": "1tuAqY26wzm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2013/6097/0/06550191",
"title": "Is the user trained? Assessing performance and cognitive resource demands in the Virtusphere",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550191/12OmNApLGAG",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itag/2014/6795/0/6795a013",
"title": "How Body Movement Influences Virtual Reality Analgesia?",
"doi": null,
"abstractUrl": "/proceedings-article/itag/2014/6795a013/12OmNC4wtBe",
"parentPublication": {
"id": "proceedings/itag/2014/6795/0",
"title": "2014 International Conference on Interactive Technologies and Games (iTAG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2018/7315/0/731500a207",
"title": "Investigation on the Correlation between Eye Movement and Reaction Time under Mental Fatigue Influence",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2018/731500a207/17D45WHONlB",
"parentPublication": {
"id": "proceedings/cw/2018/7315/0",
"title": "2018 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a172",
"title": "Predicting Reading Performance based on Eye Movement Analysis with Hidden Markov Models",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a172/1FUUgXB4TCw",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a594",
"title": "Effects of Rendering Styles of a Virtual Character on Avoidance Movement Behavior",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a594/1J7WjkWVarS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798043",
"title": "Effects of Self-Avatar and Gaze on Avoidance Movement Behavior",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798043/1cJ0Wx6hvhK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vhcie/2017/2758/0/07935624",
"title": "Evaluating collision avoidance effects on discomfort in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vhcie/2017/07935624/1h0Lhmayehq",
"parentPublication": {
"id": "proceedings/vhcie/2017/2758/0",
"title": "2017 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089452",
"title": "Real and Virtual Environment Mismatching Induces Arousal and Alters Movement Behavior",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089452/1jIxcobDHi0",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09206143",
"title": "Spatial Presence, Performance, and Behavior between Real, Remote, and Virtual Immersive Environments",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09206143/1npxM6fDN7i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icscde/2021/0142/0/014200a215",
"title": "Obstacle avoidance path optimization method of multi-legged robot based on virtual reality technology",
"doi": null,
"abstractUrl": "/proceedings-article/icscde/2021/014200a215/1xtSA2H7Iti",
"parentPublication": {
"id": "proceedings/icscde/2021/0142/0",
"title": "2021 International Conference of Social Computing and Digital Economy (ICSCDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeQWUAFeq4",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00106",
"title": "Doctoral Consortium: Human aspects of virtual characters",
"normalizedTitle": "Doctoral Consortium: Human aspects of virtual characters",
"abstract": "On the one hand, virtual characters may be similar to people in terms of appearance and behavior, but on the other hand, they cannot be considered fully \"human\". The main goal of this project is to answer the question about the boundaries of the analogy between interactions with real people and interactions with virtual characters. To investigate this, I plan three lines of research covering performance-based as well as economic and moral decision-making tasks. The tasks will differ in terms of the explicitness of the correct answer (from an easy to define the appropriate solution in performance tasks to the inability to choose the only correct option in moral dilemmas) and complexity of the social situation. The main outcome of the experiments will be the answer to the question of how \"humanly\" are virtual characters treated. I expect that if virtual human are treated like real people, I will observe similar behavioral patterns as when people have real social interactions. Additionally, I plan to systematically manipulate the external characteristics of a virtual character to find the determinants of its influence on participant’s behavior. Specifically, I will attempt to achieve the level of realism appropriate to induce a sense of copresence and consequent influence of virtual characters on people’s behavior. The entire project will therefore answer a broader question about the possibility, scope and nature of the influence of virtual characters on human behavior.",
"abstracts": [
{
"abstractType": "Regular",
"content": "On the one hand, virtual characters may be similar to people in terms of appearance and behavior, but on the other hand, they cannot be considered fully \"human\". The main goal of this project is to answer the question about the boundaries of the analogy between interactions with real people and interactions with virtual characters. To investigate this, I plan three lines of research covering performance-based as well as economic and moral decision-making tasks. The tasks will differ in terms of the explicitness of the correct answer (from an easy to define the appropriate solution in performance tasks to the inability to choose the only correct option in moral dilemmas) and complexity of the social situation. The main outcome of the experiments will be the answer to the question of how \"humanly\" are virtual characters treated. I expect that if virtual human are treated like real people, I will observe similar behavioral patterns as when people have real social interactions. Additionally, I plan to systematically manipulate the external characteristics of a virtual character to find the determinants of its influence on participant’s behavior. Specifically, I will attempt to achieve the level of realism appropriate to induce a sense of copresence and consequent influence of virtual characters on people’s behavior. The entire project will therefore answer a broader question about the possibility, scope and nature of the influence of virtual characters on human behavior.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "On the one hand, virtual characters may be similar to people in terms of appearance and behavior, but on the other hand, they cannot be considered fully \"human\". The main goal of this project is to answer the question about the boundaries of the analogy between interactions with real people and interactions with virtual characters. To investigate this, I plan three lines of research covering performance-based as well as economic and moral decision-making tasks. The tasks will differ in terms of the explicitness of the correct answer (from an easy to define the appropriate solution in performance tasks to the inability to choose the only correct option in moral dilemmas) and complexity of the social situation. The main outcome of the experiments will be the answer to the question of how \"humanly\" are virtual characters treated. I expect that if virtual human are treated like real people, I will observe similar behavioral patterns as when people have real social interactions. Additionally, I plan to systematically manipulate the external characteristics of a virtual character to find the determinants of its influence on participant’s behavior. Specifically, I will attempt to achieve the level of realism appropriate to induce a sense of copresence and consequent influence of virtual characters on people’s behavior. The entire project will therefore answer a broader question about the possibility, scope and nature of the influence of virtual characters on human behavior.",
"fno": "129800a457",
"keywords": [
"Decision Making",
"Human Factors",
"Social Aspects Of Automation",
"Virtual Reality",
"Virtual Human",
"Virtual Character",
"Doctoral Consortium",
"Human Aspects",
"Moral Dilemmas",
"Social Situation",
"Social Interactions",
"People Behavior",
"Economics",
"Ethics",
"Decision Making",
"Medical Services",
"Complexity Theory",
"Task Analysis",
"Augmented Reality",
"Virtual Human",
"VR",
"Virtual Reality",
"Agent"
],
"authors": [
{
"affiliation": "Jagiellonian University,Emotion and Perception Lab, Institute of Psychology,Krakow",
"fullName": "Radosław Sterna",
"givenName": "Radosław",
"surname": "Sterna",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "457-458",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "129800a453",
"articleId": "1yfxJ6xhCww",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a459",
"articleId": "1yeQDpgn9ks",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2016/9041/0/9041a312",
"title": "The Impact of a Mobile Augmented Reality Game: Changing Students' Perceptions of the Complexity of Socioscientific Reasoning",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2016/9041a312/12OmNAkniUU",
"parentPublication": {
"id": "proceedings/icalt/2016/9041/0",
"title": "2016 IEEE 16th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892226",
"title": "Asking ethical questions in research using immersive virtual and augmented reality technologies with children and youth",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892226/12OmNrIrPjW",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349428",
"title": "Simulation of the dynamics of virtual characters' emotions and social relations",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349428/12OmNvkpl7I",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802088",
"title": "Simulating crowd interactions in virtual environments (doctoral consortium)",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802088/12OmNwEJ0WC",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a121",
"title": "[POSTER] Believable Virtual Characters for Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a121/12OmNwJybU7",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2008/3095/0/3095a840",
"title": "A Networked Virtual Environment for Teaching Handwritten Characters Through Haptization of Human Motor Skills",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2008/3095a840/12OmNyL0Tx4",
"parentPublication": {
"id": "proceedings/aina/2008/3095/0",
"title": "22nd International Conference on Advanced Information Networking and Applications (aina 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/04/mcg2017040014",
"title": "Animation of Natural Virtual Characters",
"doi": null,
"abstractUrl": "/magazine/cg/2017/04/mcg2017040014/13rRUwwslwD",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a917",
"title": "NUX Characters - interaction with voice assistants in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a917/1J7W9AhUVt6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090675",
"title": "Developing Embodied Interactive Virtual Characters for Human-Subjects Studies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090675/1jIxqoL3kbK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2022/01/09461651",
"title": "Is the Perceived Comfort With CG Characters Increasing With Their Novelty?",
"doi": null,
"abstractUrl": "/magazine/cg/2022/01/09461651/1uCdWoUaHpm",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx7ouUM",
"title": "2013 International Conference on Computational and Information Sciences",
"acronym": "iccis",
"groupId": "1800262",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNASraV8",
"doi": "10.1109/ICCIS.2013.82",
"title": "3D Model Semantic Automatic Annotation Based on X3D Scene",
"normalizedTitle": "3D Model Semantic Automatic Annotation Based on X3D Scene",
"abstract": "Based on the semantic similarity between models in the same 3D scene, a 3D model semantic automatic annotation algorithm is proposed. This algorithm abstracts a scene into a scene-tree, uses ontology for semantic reasoning between scene and model, and annotates model files automatically. First, model domain ontology for scene is made. Then the scene-tree is traversed twice, and the 3D models annotation is achieved by semantic reasoning among nodes. The experiments show the feasibility of this method, which lays a foundation for semantic retrieval and rapid modeling of 3D scene.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Based on the semantic similarity between models in the same 3D scene, a 3D model semantic automatic annotation algorithm is proposed. This algorithm abstracts a scene into a scene-tree, uses ontology for semantic reasoning between scene and model, and annotates model files automatically. First, model domain ontology for scene is made. Then the scene-tree is traversed twice, and the 3D models annotation is achieved by semantic reasoning among nodes. The experiments show the feasibility of this method, which lays a foundation for semantic retrieval and rapid modeling of 3D scene.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Based on the semantic similarity between models in the same 3D scene, a 3D model semantic automatic annotation algorithm is proposed. This algorithm abstracts a scene into a scene-tree, uses ontology for semantic reasoning between scene and model, and annotates model files automatically. First, model domain ontology for scene is made. Then the scene-tree is traversed twice, and the 3D models annotation is achieved by semantic reasoning among nodes. The experiments show the feasibility of this method, which lays a foundation for semantic retrieval and rapid modeling of 3D scene.",
"fno": "5004a282",
"keywords": [
"Semantics",
"Three Dimensional Displays",
"Solid Modeling",
"Ontologies",
"Cognition",
"Computational Modeling",
"Educational Institutions",
"X 3 D",
"Semantic Annotation",
"3 D Model",
"Scene",
"Ontology"
],
"authors": [
{
"affiliation": null,
"fullName": "Jun Liu",
"givenName": "Jun",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wenzhen Su",
"givenName": "Wenzhen",
"surname": "Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yu Sun",
"givenName": "Yu",
"surname": "Sun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-06-01T00:00:00",
"pubType": "proceedings",
"pages": "282-285",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5004-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5004a280",
"articleId": "12OmNyKJiyF",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5004a286",
"articleId": "12OmNxR5UFT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sitis/2015/9721/0/9721a306",
"title": "Towards a Scene-Based Video Annotation Framework",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2015/9721a306/12OmNC4eSH2",
"parentPublication": {
"id": "proceedings/sitis/2015/9721/0",
"title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kse/2012/4760/0/4760a099",
"title": "Towards Efficient Sport Data Integration through Semantic Annotation",
"doi": null,
"abstractUrl": "/proceedings-article/kse/2012/4760a099/12OmNvRU0g3",
"parentPublication": {
"id": "proceedings/kse/2012/4760/0",
"title": "Knowledge and Systems Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d688",
"title": "Semantic Instance Annotation of Street Scenes by 3D to 2D Label Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d688/12OmNy2ah1y",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbd/2013/3261/0/3261a197",
"title": "Research of Data Provenance Semantic Annotation for Dependency Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cbd/2013/3261a197/12OmNyYDDyg",
"parentPublication": {
"id": "proceedings/cbd/2013/3261/0",
"title": "2013 International Conference on Advanced Cloud and Big Data (CBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/skg/2007/3007/0/30070170",
"title": "OLYVIA: Ontology-based Automatic Video Annotation and Summarization System Using Semantic Inference Rules",
"doi": null,
"abstractUrl": "/proceedings-article/skg/2007/30070170/12OmNz6iOay",
"parentPublication": {
"id": "proceedings/skg/2007/3007/0",
"title": "Semantics, Knowledge and Grid, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a022",
"title": "Semantic Annotation of Patient-Specific 3D Anatomical Models",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a022/12OmNzV70v0",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a190",
"title": "Semantic Scene Completion from a Single Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a190/12OmNzn38Ky",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600d981",
"title": "MonoScene: Monocular 3D Semantic Scene Completion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600d981/1H0KHmQGhEs",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600a416",
"title": "Two Stream 3D Semantic Scene Completion",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600a416/1iTvpmQNrbO",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2020/4272/0/427200a085",
"title": "Semantic Tree-Based 3D Scene Model Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2020/427200a085/1mAa0fUKu1q",
"parentPublication": {
"id": "proceedings/mipr/2020/4272/0",
"title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvRU0cK",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxy4N6P",
"doi": "10.1109/ISMAR.2016.21",
"title": "PPV: Pixel-Point-Volume Segmentation for Object Referencing in Collaborative Augmented Reality",
"normalizedTitle": "PPV: Pixel-Point-Volume Segmentation for Object Referencing in Collaborative Augmented Reality",
"abstract": "We present a method for collaborative augmented reality (AR) that enables users from different viewpoints to interpret object references specified via 2D on-screen circling gestures. Based on a user's 2D drawing annotation, the method segments out the userselected object using an incomplete or imperfect scene model and the color image from the drawing viewpoint. Specifically, we propose a novel segmentation algorithm that utilizes both 2D and 3D scene cues, structured into a three-layer graph of pixels, 3D points, and volumes (supervoxels), solved via standard graph cut algorithms. This segmentation enables an appropriate rendering of the user's 2D annotation from other viewpoints in 3D augmented reality. Results demonstrate the superiority of the proposed method over existing methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a method for collaborative augmented reality (AR) that enables users from different viewpoints to interpret object references specified via 2D on-screen circling gestures. Based on a user's 2D drawing annotation, the method segments out the userselected object using an incomplete or imperfect scene model and the color image from the drawing viewpoint. Specifically, we propose a novel segmentation algorithm that utilizes both 2D and 3D scene cues, structured into a three-layer graph of pixels, 3D points, and volumes (supervoxels), solved via standard graph cut algorithms. This segmentation enables an appropriate rendering of the user's 2D annotation from other viewpoints in 3D augmented reality. Results demonstrate the superiority of the proposed method over existing methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a method for collaborative augmented reality (AR) that enables users from different viewpoints to interpret object references specified via 2D on-screen circling gestures. Based on a user's 2D drawing annotation, the method segments out the userselected object using an incomplete or imperfect scene model and the color image from the drawing viewpoint. Specifically, we propose a novel segmentation algorithm that utilizes both 2D and 3D scene cues, structured into a three-layer graph of pixels, 3D points, and volumes (supervoxels), solved via standard graph cut algorithms. This segmentation enables an appropriate rendering of the user's 2D annotation from other viewpoints in 3D augmented reality. Results demonstrate the superiority of the proposed method over existing methods.",
"fno": "3641a077",
"keywords": [
"Three Dimensional Displays",
"Two Dimensional Displays",
"Solid Modeling",
"Image Segmentation",
"Collaboration",
"Image Reconstruction",
"Augmented Reality",
"And Virtual Realities",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial",
"Augmented"
],
"authors": [
{
"affiliation": null,
"fullName": "Kuo-Chin Lien",
"givenName": "Kuo-Chin",
"surname": "Lien",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Benjamin Nuernberger",
"givenName": "Benjamin",
"surname": "Nuernberger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tobias Höllerer",
"givenName": "Tobias",
"surname": "Höllerer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Matthew Turk",
"givenName": "Matthew",
"surname": "Turk",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-09-01T00:00:00",
"pubType": "proceedings",
"pages": "77-83",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3641-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3641a071",
"articleId": "12OmNBrV1TN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3641a084",
"articleId": "12OmNCfSqMX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a402",
"title": "3D Shape Induction from 2D Views of Multiple Objects",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a402/12OmNBLdKOR",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460046",
"title": "Interpreting 2D gesture annotations in 3D augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460046/12OmNBSSVcS",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892323",
"title": "Texturing of augmented reality character based on colored drawing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892323/12OmNCcKQu9",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550237",
"title": "Poster: 3D referencing for remote task assistance in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550237/12OmNqC2uWf",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892383",
"title": "Gesture-based augmented reality annotation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892383/12OmNwJPMYX",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/08/08405559",
"title": "Viewpoint Assessment and Recommendation for Photographing Architectures",
"doi": null,
"abstractUrl": "/journal/tg/2019/08/08405559/13rRUxBa56d",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699222",
"title": "TutAR: Semi-Automatic Generation of Augmented Reality Tutorials for Medical Education",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699222/19F1PQOMxWg",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a136",
"title": "A Two-Point Map-Based Interface for Architectural Walkthrough",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a136/1gyslJSI35u",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090401",
"title": "Learning to Match 2D Images and 3D LiDAR Point Clouds for Outdoor Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090401/1jIxmhXvH7a",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ase/2020/6768/0/676800b267",
"title": "Towards Immersive Comprehension of Software Systems Using Augmented Reality - An Empirical Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/ase/2020/676800b267/1pP3IvL3Z6w",
"parentPublication": {
"id": "proceedings/ase/2020/6768/0",
"title": "2020 35th IEEE/ACM International Conference on Automated Software Engineering (ASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqH9hnp",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy2ah1y",
"doi": "10.1109/CVPR.2016.401",
"title": "Semantic Instance Annotation of Street Scenes by 3D to 2D Label Transfer",
"normalizedTitle": "Semantic Instance Annotation of Street Scenes by 3D to 2D Label Transfer",
"abstract": "Semantic annotations are vital for training models for object recognition, semantic segmentation or scene understanding. Unfortunately, pixelwise annotation of images at very large scale is labor-intensive and only little labeled data is available, particularly at instance level and for street scenes. In this paper, we propose to tackle this problem by lifting the semantic instance labeling task from 2D into 3D. Given reconstructions from stereo or laser data, we annotate static 3D scene elements with rough bounding primitives and develop a model which transfers this information into the image domain. We leverage our method to obtain 2D labels for a novel suburban video dataset which we have collected, resulting in 400k semantic and instance image annotations. A comparison of our method to state-of the-art label transfer baselines reveals that 3D information enables more efficient annotation while at the same time resulting in improved accuracy and time-coherent labels.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Semantic annotations are vital for training models for object recognition, semantic segmentation or scene understanding. Unfortunately, pixelwise annotation of images at very large scale is labor-intensive and only little labeled data is available, particularly at instance level and for street scenes. In this paper, we propose to tackle this problem by lifting the semantic instance labeling task from 2D into 3D. Given reconstructions from stereo or laser data, we annotate static 3D scene elements with rough bounding primitives and develop a model which transfers this information into the image domain. We leverage our method to obtain 2D labels for a novel suburban video dataset which we have collected, resulting in 400k semantic and instance image annotations. A comparison of our method to state-of the-art label transfer baselines reveals that 3D information enables more efficient annotation while at the same time resulting in improved accuracy and time-coherent labels.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Semantic annotations are vital for training models for object recognition, semantic segmentation or scene understanding. Unfortunately, pixelwise annotation of images at very large scale is labor-intensive and only little labeled data is available, particularly at instance level and for street scenes. In this paper, we propose to tackle this problem by lifting the semantic instance labeling task from 2D into 3D. Given reconstructions from stereo or laser data, we annotate static 3D scene elements with rough bounding primitives and develop a model which transfers this information into the image domain. We leverage our method to obtain 2D labels for a novel suburban video dataset which we have collected, resulting in 400k semantic and instance image annotations. A comparison of our method to state-of the-art label transfer baselines reveals that 3D information enables more efficient annotation while at the same time resulting in improved accuracy and time-coherent labels.",
"fno": "8851d688",
"keywords": [
"Three Dimensional Displays",
"Two Dimensional Displays",
"Semantics",
"Solid Modeling",
"Image Segmentation",
"Lasers",
"Cameras"
],
"authors": [
{
"affiliation": null,
"fullName": "Jun Xie",
"givenName": "Jun",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Martin Kiefel",
"givenName": "Martin",
"surname": "Kiefel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ming-Ting Sun",
"givenName": "Ming-Ting",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Andreas Geiger",
"givenName": "Andreas",
"surname": "Geiger",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "3688-3697",
"year": "2016",
"issn": "1063-6919",
"isbn": "978-1-4673-8851-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8851d678",
"articleId": "12OmNzTppD3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8851d698",
"articleId": "12OmNzuZUwr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccis/2013/5004/0/5004a282",
"title": "3D Model Semantic Automatic Annotation Based on X3D Scene",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a282/12OmNASraV8",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890539",
"title": "SAGTA: Semi-automatic Ground Truth Annotation in crowd scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890539/12OmNBOUxmi",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892383",
"title": "Gesture-based augmented reality annotation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892383/12OmNwJPMYX",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457c432",
"title": "ScanNet: Richly-Annotated 3D Reconstructions of Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457c432/12OmNyRg4C5",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08115231",
"title": "A Robust 3D-2D Interactive Tool for Scene Segmentation and Annotation",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08115231/14H4WMh20es",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545378",
"title": "3D Geometry-Aware Semantic Labeling of Outdoor Street Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545378/17D45VtKiwd",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c569",
"title": "SGPN: Similarity Group Proposal Network for 3D Point Cloud Instance Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c569/17D45Vw15u3",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000d559",
"title": "3D-RCNN: Instance-Level 3D Object Reconstruction via Render-and-Compare",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000d559/17D45W9KVIT",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0720",
"title": "DArch: Dental Arch Prior-assisted 3D Tooth Instance Segmentation with Weak Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0720/1H1kFKjFl16",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a301",
"title": "Panoptic NeRF: 3D-to-2D Label Transfer for Panoptic Urban Scene Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a301/1KYstcv8HRe",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx8wTfL",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzV70D6",
"doi": "10.1109/ICPR.2008.4761172",
"title": "An interactive scene annotation tool for video surveillance",
"normalizedTitle": "An interactive scene annotation tool for video surveillance",
"abstract": "An interactive scene annotation tool for video surveillance is presented in this paper. The annotation process is divided into three stages. (1) camera rough calibration;(2) calibration refinement; (3) major surfaces annotation. Inputs are then rendered in a 3D environment, which again help users check calibration accuracy and annotation correctness. Experiments show that this tool is easy to use and attains acceptable annotation accuracy. The interactive procedure helps users without knowledge in computer vision to complete camera calibration as well as surface annotation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An interactive scene annotation tool for video surveillance is presented in this paper. The annotation process is divided into three stages. (1) camera rough calibration;(2) calibration refinement; (3) major surfaces annotation. Inputs are then rendered in a 3D environment, which again help users check calibration accuracy and annotation correctness. Experiments show that this tool is easy to use and attains acceptable annotation accuracy. The interactive procedure helps users without knowledge in computer vision to complete camera calibration as well as surface annotation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An interactive scene annotation tool for video surveillance is presented in this paper. The annotation process is divided into three stages. (1) camera rough calibration;(2) calibration refinement; (3) major surfaces annotation. Inputs are then rendered in a 3D environment, which again help users check calibration accuracy and annotation correctness. Experiments show that this tool is easy to use and attains acceptable annotation accuracy. The interactive procedure helps users without knowledge in computer vision to complete camera calibration as well as surface annotation.",
"fno": "04761172",
"keywords": [
"Calibration",
"Computer Vision",
"Interactive Systems",
"Rendering Computer Graphics",
"Video Surveillance",
"Interactive Scene Annotation Tool",
"Camera Rough Calibration",
"Calibration Refinement",
"Major Surfaces Annotation",
"Computer Vision",
"Camera Calibration",
"Layout",
"Video Surveillance",
"Cameras",
"Calibration",
"Remote Sensing",
"Rough Surfaces",
"Surface Roughness",
"Data Mining",
"Parameter Estimation",
"Statistics"
],
"authors": [
{
"affiliation": "Beijing Institute of Technology, China",
"fullName": "Wenze Hu",
"givenName": "Wenze",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lotus Hill Research Institute, Ezhou, Hubei, China",
"fullName": "Jianting Wen",
"givenName": "Jianting",
"surname": "Wen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lotus Hill Research Institute, Ezhou, Hubei, China",
"fullName": "Haifeng Gong",
"givenName": "Haifeng",
"surname": "Gong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology, China",
"fullName": "Yongtian Wang",
"givenName": "Yongtian",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1051-4651",
"isbn": "978-1-4244-2174-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04761171",
"articleId": "12OmNwE9Olz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04761173",
"articleId": "12OmNAlvHLQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2007/2786/0/27860473",
"title": "Automatic Annotation of Humans in Surveillance Video",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2007/27860473/12OmNAlvHvf",
"parentPublication": {
"id": "proceedings/crv/2007/2786/0",
"title": "Fourth Canadian Conference on Computer and Robot Vision (CRV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06239249",
"title": "Realistic 3D reconstruction of the human teeth using shape from shading with shape priors",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239249/12OmNC0guzp",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2012/1204/0/06184205",
"title": "Poster: Design considerations for fabric-based input for surface design",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2012/06184205/12OmNqBbHE0",
"parentPublication": {
"id": "proceedings/3dui/2012/1204/0",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981777",
"title": "Robust camera calibration tool for video surveillance camera in urban environment",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981777/12OmNqJHFKI",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2005/9331/0/01521581",
"title": "I/sup 2/ A: an interactive image annotation system",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2005/01521581/12OmNrIae6V",
"parentPublication": {
"id": "proceedings/icme/2005/9331/0",
"title": "2005 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270307",
"title": "A Face Annotation Framework with Partial Clustering and Interactive Labeling",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270307/12OmNwDACC1",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2003/03/u3030",
"title": "Interactive Adaptive Movie Annotation",
"doi": null,
"abstractUrl": "/magazine/mu/2003/03/u3030/13rRUwInvPr",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08115231",
"title": "A Robust 3D-2D Interactive Tool for Scene Segmentation and Annotation",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08115231/14H4WMh20es",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093398",
"title": "Toward Interactive Self-Annotation For Video Object Bounding Box: Recurrent Self-Learning And Hierarchical Annotation Based Framework",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093398/1jPbcKKCZvq",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2019/4732/0/09174582",
"title": "4-D Scene Alignment in Surveillance Video",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2019/09174582/1myv8Nc9bkA",
"parentPublication": {
"id": "proceedings/aipr/2019/4732/0",
"title": "2019 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "14qdcP8Ivdv",
"title": "2018 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45W9KVHL",
"doi": "10.1109/3DV.2018.00056",
"title": "Adversarial Semantic Scene Completion from a Single Depth Image",
"normalizedTitle": "Adversarial Semantic Scene Completion from a Single Depth Image",
"abstract": "We propose a method to reconstruct, complete and semantically label a 3D scene from a single input depth image. We improve the accuracy of the regressed semantic 3D maps by a novel architecture based on adversarial learning. In particular, we suggest using multiple adversarial loss terms that not only enforce realistic outputs with respect to the ground truth, but also an effective embedding of the internal features. This is done by correlating the latent features of the encoder working on partial 2.5D data with the latent features extracted from a variational 3D auto-encoder trained to reconstruct the complete semantic scene. In addition, differently from other approaches that operate entirely through 3D convolutions, at test time we retain the original 2.5D structure of the input during downsampling to improve the effectiveness of the internal representation of our model. We test our approach on the main benchmark datasets for semantic scene completion to qualitatively and quantitatively assess the effectiveness of our proposal.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a method to reconstruct, complete and semantically label a 3D scene from a single input depth image. We improve the accuracy of the regressed semantic 3D maps by a novel architecture based on adversarial learning. In particular, we suggest using multiple adversarial loss terms that not only enforce realistic outputs with respect to the ground truth, but also an effective embedding of the internal features. This is done by correlating the latent features of the encoder working on partial 2.5D data with the latent features extracted from a variational 3D auto-encoder trained to reconstruct the complete semantic scene. In addition, differently from other approaches that operate entirely through 3D convolutions, at test time we retain the original 2.5D structure of the input during downsampling to improve the effectiveness of the internal representation of our model. We test our approach on the main benchmark datasets for semantic scene completion to qualitatively and quantitatively assess the effectiveness of our proposal.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a method to reconstruct, complete and semantically label a 3D scene from a single input depth image. We improve the accuracy of the regressed semantic 3D maps by a novel architecture based on adversarial learning. In particular, we suggest using multiple adversarial loss terms that not only enforce realistic outputs with respect to the ground truth, but also an effective embedding of the internal features. This is done by correlating the latent features of the encoder working on partial 2.5D data with the latent features extracted from a variational 3D auto-encoder trained to reconstruct the complete semantic scene. In addition, differently from other approaches that operate entirely through 3D convolutions, at test time we retain the original 2.5D structure of the input during downsampling to improve the effectiveness of the internal representation of our model. We test our approach on the main benchmark datasets for semantic scene completion to qualitatively and quantitatively assess the effectiveness of our proposal.",
"fno": "842500a426",
"keywords": [
"Feature Extraction",
"Image Reconstruction",
"Image Representation",
"Image Sensors",
"Learning Artificial Intelligence",
"Object Detection",
"Adversarial Semantic Scene Completion",
"Single Input Depth Image",
"Regressed Semantic 3 D Maps",
"Adversarial Learning",
"Multiple Adversarial Loss Terms",
"Realistic Outputs",
"Ground Truth",
"Effective Embedding",
"Internal Features",
"Latent Features",
"Auto Encoder",
"Complete Semantic Scene",
"Three Dimensional Displays",
"Semantics",
"Image Reconstruction",
"Feature Extraction",
"Generators",
"Training",
"Two Dimensional Displays",
"Adversarial Training",
"Depth Image",
"Scene Completion",
"Latent Space"
],
"authors": [
{
"affiliation": null,
"fullName": "Yida Wang",
"givenName": "Yida",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "David Joseph Tan",
"givenName": "David Joseph",
"surname": "Tan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nassir Navab",
"givenName": "Nassir",
"surname": "Navab",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Federico Tombari",
"givenName": "Federico",
"surname": "Tombari",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-09-01T00:00:00",
"pubType": "proceedings",
"pages": "426-434",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-8425-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "842500a418",
"articleId": "17D45VtKixq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "842500a435",
"articleId": "17D45Xh13tT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457a190",
"title": "Semantic Scene Completion from a Single Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a190/12OmNzn38Ky",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/03/07889053",
"title": "Single-View 3D Scene Reconstruction and Parsing by Attribute Grammar",
"doi": null,
"abstractUrl": "/journal/tp/2018/03/07889053/13rRUwdrdM2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a334",
"title": "Human-Centric Scene Understanding from Single View 360 Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a334/17D45XDIXOk",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5183",
"title": "Indoor Scene Generation from a Collection of Semantic-Segmented Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5183/1BmFbxElKX6",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300i607",
"title": "ForkNet: Multi-Branch Volumetric Semantic Completion From a Single Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300i607/1hQqscNGkTu",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h800",
"title": "Cascaded Context Pyramid for Full-Resolution 3D Semantic Scene Completion",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h800/1hVlr89nneE",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600a416",
"title": "Two Stream 3D Semantic Scene Completion",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600a416/1iTvpmQNrbO",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150931",
"title": "Geometry to the Rescue: 3D Instance Reconstruction from a Cluttered Scene",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150931/1lPHarhatd6",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/7.168E197",
"title": "3D Sketch-Aware Semantic Scene Completion via Semi-Supervised Structure Prior",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/7.168E197/1m3ngObnCda",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a801",
"title": "SCFusion: Real-time Incremental Scene Reconstruction with Semantic Completion",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a801/1qyxiNprAo8",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "14qdcP8Ivdv",
"title": "2018 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XDIXOk",
"doi": "10.1109/3DV.2018.00046",
"title": "Human-Centric Scene Understanding from Single View 360 Video",
"normalizedTitle": "Human-Centric Scene Understanding from Single View 360 Video",
"abstract": "In this paper, we propose an approach to indoor scene understanding from observation of people in single view spherical video. As input, our approach takes a centrally located spherical video capture of an indoor scene, estimating the 3D localisation of human actions performed throughout the long term capture. The central contribution of this work is a deep convolutional encoder-decoder network trained on a synthetic dataset to reconstruct regions of affordance from captured human activity. The predicted affordance segmentation is then applied to compose a reconstruction of the complete 3D scene, integrating the affordance segmentation into 3D space. The mapping learnt between human activity and affordance segmentation demonstrates that omnidirectional observation of human activity can be applied to scene understanding tasks such as 3D reconstruction. We show that our approach using only observation of people performs well against previous approaches, allowing reconstruction of occluded regions and labelling of scene affordances.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose an approach to indoor scene understanding from observation of people in single view spherical video. As input, our approach takes a centrally located spherical video capture of an indoor scene, estimating the 3D localisation of human actions performed throughout the long term capture. The central contribution of this work is a deep convolutional encoder-decoder network trained on a synthetic dataset to reconstruct regions of affordance from captured human activity. The predicted affordance segmentation is then applied to compose a reconstruction of the complete 3D scene, integrating the affordance segmentation into 3D space. The mapping learnt between human activity and affordance segmentation demonstrates that omnidirectional observation of human activity can be applied to scene understanding tasks such as 3D reconstruction. We show that our approach using only observation of people performs well against previous approaches, allowing reconstruction of occluded regions and labelling of scene affordances.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose an approach to indoor scene understanding from observation of people in single view spherical video. As input, our approach takes a centrally located spherical video capture of an indoor scene, estimating the 3D localisation of human actions performed throughout the long term capture. The central contribution of this work is a deep convolutional encoder-decoder network trained on a synthetic dataset to reconstruct regions of affordance from captured human activity. The predicted affordance segmentation is then applied to compose a reconstruction of the complete 3D scene, integrating the affordance segmentation into 3D space. The mapping learnt between human activity and affordance segmentation demonstrates that omnidirectional observation of human activity can be applied to scene understanding tasks such as 3D reconstruction. We show that our approach using only observation of people performs well against previous approaches, allowing reconstruction of occluded regions and labelling of scene affordances.",
"fno": "842500a334",
"keywords": [
"Image Colour Analysis",
"Image Motion Analysis",
"Image Reconstruction",
"Image Representation",
"Image Segmentation",
"Image Sequences",
"Video Signal Processing",
"Scene Understanding Tasks",
"Scene Affordances",
"Human Centric Scene",
"Single View 360 Video",
"Indoor Scene Understanding",
"Single View Spherical Video",
"Centrally Located Spherical Video Capture",
"Human Actions",
"Long Term Capture",
"Central Contribution",
"Deep Convolutional Encoder Decoder Network",
"Synthetic Dataset",
"Captured Human Activity",
"Affordance Segmentation",
"Complete 3 D Scene",
"Omnidirectional Observation",
"Three Dimensional Displays",
"Sensors",
"Image Reconstruction",
"Cameras",
"Task Analysis",
"Two Dimensional Displays",
"Semantics",
"Indoor Scene Understanding",
"3 D Reconstruction",
"Affordance Segmentation",
"Human Scene Interaction"
],
"authors": [
{
"affiliation": null,
"fullName": "Sam Fowler",
"givenName": "Sam",
"surname": "Fowler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hansung Kim",
"givenName": "Hansung",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Adrian Hilton",
"givenName": "Adrian",
"surname": "Hilton",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-09-01T00:00:00",
"pubType": "proceedings",
"pages": "334-342",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-8425-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "842500a324",
"articleId": "17D45WLdYRt",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "842500a343",
"articleId": "17D45XreC7C",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tp/2018/03/07889053",
"title": "Single-View 3D Scene Reconstruction and Parsing by Attribute Grammar",
"doi": null,
"abstractUrl": "/journal/tp/2018/03/07889053/13rRUwdrdM2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2017/1235/0/08457938",
"title": "Scene wireframes sketching for UAVs",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2017/08457938/13xI8B3IkY3",
"parentPublication": {
"id": "proceedings/aipr/2017/1235/0",
"title": "2017 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486576",
"title": "Joint Multi-View People Tracking and Pose Estimation for 3D Scene Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486576/14jQfQj2fTp",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f899",
"title": "Human-Centric Indoor Scene Synthesis Using Stochastic Grammar",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f899/17D45W9KVIW",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a625",
"title": "Novel Single View Constraints for Manhattan 3D Line Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a625/17D45Xi9rXa",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300i647",
"title": "Holistic++ Scene Understanding: Single-View 3D Holistic Scene Parsing and Human Pose Estimation With Human-Object Interaction and Physical Commonsense",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300i647/1hQqhrIltn2",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0422",
"title": "U4D: Unsupervised 4D Dynamic Scene Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0422/1hQqutyasIU",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c282",
"title": "Resolving 3D Human Pose Ambiguities With 3D Scene Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c282/1hVlg3qjlHq",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c080",
"title": "Silhouette-Assisted 3D Object Instance Reconstruction from a Cluttered Scene",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c080/1i5mva7fXQ4",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150931",
"title": "Geometry to the Rescue: 3D Instance Reconstruction from a Cluttered Scene",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150931/1lPHarhatd6",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1hQqfuoOyHu",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hQqhrIltn2",
"doi": "10.1109/ICCV.2019.00874",
"title": "Holistic++ Scene Understanding: Single-View 3D Holistic Scene Parsing and Human Pose Estimation With Human-Object Interaction and Physical Commonsense",
"normalizedTitle": "Holistic++ Scene Understanding: Single-View 3D Holistic Scene Parsing and Human Pose Estimation With Human-Object Interaction and Physical Commonsense",
"abstract": "We propose a new 3D holistic<sup>++</sup> scene understanding problem, which jointly tackles two tasks from a single-view image: (i) holistic scene parsing and reconstruction-3D estimations of object bounding boxes, camera pose, and room layout, and (ii) 3D human pose estimation. The intuition behind is to leverage the coupled nature of these two tasks to improve the granularity and performance of scene understanding. We propose to exploit two critical and essential connections between these two tasks: (i) human-object interaction (HOI) to model the fine-grained relations between agents and objects in the scene, and (ii) physical commonsense to model the physical plausibility of the reconstructed scene. The optimal configuration of the 3D scene, represented by a parse graph, is inferred using Markov chain Monte Carlo (MCMC), which efficiently traverses through the non-differentiable joint solution space. Experimental results demonstrate that the proposed algorithm significantly improves the performance of the two tasks on three datasets, showing an improved generalization ability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a new 3D holistic<sup>++</sup> scene understanding problem, which jointly tackles two tasks from a single-view image: (i) holistic scene parsing and reconstruction-3D estimations of object bounding boxes, camera pose, and room layout, and (ii) 3D human pose estimation. The intuition behind is to leverage the coupled nature of these two tasks to improve the granularity and performance of scene understanding. We propose to exploit two critical and essential connections between these two tasks: (i) human-object interaction (HOI) to model the fine-grained relations between agents and objects in the scene, and (ii) physical commonsense to model the physical plausibility of the reconstructed scene. The optimal configuration of the 3D scene, represented by a parse graph, is inferred using Markov chain Monte Carlo (MCMC), which efficiently traverses through the non-differentiable joint solution space. Experimental results demonstrate that the proposed algorithm significantly improves the performance of the two tasks on three datasets, showing an improved generalization ability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a new 3D holistic++ scene understanding problem, which jointly tackles two tasks from a single-view image: (i) holistic scene parsing and reconstruction-3D estimations of object bounding boxes, camera pose, and room layout, and (ii) 3D human pose estimation. The intuition behind is to leverage the coupled nature of these two tasks to improve the granularity and performance of scene understanding. We propose to exploit two critical and essential connections between these two tasks: (i) human-object interaction (HOI) to model the fine-grained relations between agents and objects in the scene, and (ii) physical commonsense to model the physical plausibility of the reconstructed scene. The optimal configuration of the 3D scene, represented by a parse graph, is inferred using Markov chain Monte Carlo (MCMC), which efficiently traverses through the non-differentiable joint solution space. Experimental results demonstrate that the proposed algorithm significantly improves the performance of the two tasks on three datasets, showing an improved generalization ability.",
"fno": "480300i647",
"keywords": [
"Cameras",
"Graph Theory",
"Image Reconstruction",
"Inference Mechanisms",
"Markov Processes",
"Monte Carlo Methods",
"Object Detection",
"Pose Estimation",
"Physical Commonsense",
"Physical Plausibility",
"Reconstructed Scene",
"Parse Graph",
"Nondifferentiable Joint Solution Space",
"Single View 3 D",
"Human Pose Estimation",
"Human Object Interaction",
"Scene Understanding Problem",
"Single View Image",
"Holistic Scene Parsing",
"Reconstruction 3 D Estimations",
"Object Bounding Boxes",
"3 D Human",
"Critical Connections",
"Essential Connections",
"Three Dimensional Displays",
"Layout",
"Task Analysis",
"Image Reconstruction",
"Pose Estimation",
"Two Dimensional Displays"
],
"authors": [
{
"affiliation": "UCLA",
"fullName": "Yixin Chen",
"givenName": "Yixin",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UCLA",
"fullName": "Siyuan Huang",
"givenName": "Siyuan",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UCLA",
"fullName": "Tao Yuan",
"givenName": "Tao",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UCLA",
"fullName": "Yixin Zhu",
"givenName": "Yixin",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UCLA",
"fullName": "Siyuan Qi",
"givenName": "Siyuan",
"surname": "Qi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UCLA",
"fullName": "Song-Chun Zhu",
"givenName": "Song-Chun",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "8647-8656",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4803-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "480300i637",
"articleId": "1hVlGmjtYLC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "480300i657",
"articleId": "1hVlOa4MNUI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457b942",
"title": "Generating Holistic 3D Scene Abstractions for Text-Based Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b942/12OmNvoWV1x",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840b417",
"title": "Holistic Scene Understanding for 3D Object Detection with RGBD Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840b417/12OmNwcl7D6",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/03/07889053",
"title": "Single-View 3D Scene Reconstruction and Parsing by Attribute Grammar",
"doi": null,
"abstractUrl": "/journal/tp/2018/03/07889053/13rRUwdrdM2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a334",
"title": "Human-Centric Scene Understanding from Single View 360 Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a334/17D45XDIXOk",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200k0892",
"title": "Hypersim: A Photorealistic Synthetic Dataset for Holistic Indoor Scene Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200k0892/1BmJJPy8ySA",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0876",
"title": "HoloPose: Holistic 3D Human Reconstruction In-The-Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0876/1gys6xE6zjG",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c282",
"title": "Resolving 3D Human Pose Ambiguities With 3D Scene Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c282/1hVlg3qjlHq",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c080",
"title": "Silhouette-Assisted 3D Object Instance Reconstruction from a Cluttered Scene",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c080/1i5mva7fXQ4",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a334",
"title": "Holistic 3D Human and Scene Mesh Estimation from Single View Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a334/1yeHKLo6jlK",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900o4703",
"title": "Populating 3D Scenes by Learning Human-Scene Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900o4703/1yeHZ17Dp3a",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1hQqfuoOyHu",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hVlg3qjlHq",
"doi": "10.1109/ICCV.2019.00237",
"title": "Resolving 3D Human Pose Ambiguities With 3D Scene Constraints",
"normalizedTitle": "Resolving 3D Human Pose Ambiguities With 3D Scene Constraints",
"abstract": "To understand and analyze human behavior, we need to capture humans moving in, and interacting with, the world. Most existing methods perform 3D human pose estimation without explicitly considering the scene. We observe however that the world constrains the body and vice-versa. To motivate this, we show that current 3D human pose estimation methods produce results that are not consistent with the 3D scene. Our key contribution is to exploit static 3D scene structure to better estimate human pose from monocular images. The method enforces Proximal Relationships with Object eXclusion and is called PROX. To test this, we collect a new dataset composed of 12 different 3D scenes and RGB sequences of 20 subjects moving in and interacting with the scenes. We represent human pose using the 3D human body model SMPL-X and extend SMPLify-X to estimate body pose using scene constraints. We make use of the 3D scene information by formulating two main constraints. The inter-penetration constraint penalizes intersection between the body model and the surrounding 3D scene. The contact constraint encourages specific parts of the body to be in contact with scene surfaces if they are close enough in distance and orientation. For quantitative evaluation we capture a separate dataset with 180 RGB frames in which the ground-truth body pose is estimated using a motion capture system. We show quantitatively that introducing scene constraints significantly reduces 3D joint error and vertex error. Our code and data are available for research at https://prox.is.tue.mpg.de.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To understand and analyze human behavior, we need to capture humans moving in, and interacting with, the world. Most existing methods perform 3D human pose estimation without explicitly considering the scene. We observe however that the world constrains the body and vice-versa. To motivate this, we show that current 3D human pose estimation methods produce results that are not consistent with the 3D scene. Our key contribution is to exploit static 3D scene structure to better estimate human pose from monocular images. The method enforces Proximal Relationships with Object eXclusion and is called PROX. To test this, we collect a new dataset composed of 12 different 3D scenes and RGB sequences of 20 subjects moving in and interacting with the scenes. We represent human pose using the 3D human body model SMPL-X and extend SMPLify-X to estimate body pose using scene constraints. We make use of the 3D scene information by formulating two main constraints. The inter-penetration constraint penalizes intersection between the body model and the surrounding 3D scene. The contact constraint encourages specific parts of the body to be in contact with scene surfaces if they are close enough in distance and orientation. For quantitative evaluation we capture a separate dataset with 180 RGB frames in which the ground-truth body pose is estimated using a motion capture system. We show quantitatively that introducing scene constraints significantly reduces 3D joint error and vertex error. Our code and data are available for research at https://prox.is.tue.mpg.de.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To understand and analyze human behavior, we need to capture humans moving in, and interacting with, the world. Most existing methods perform 3D human pose estimation without explicitly considering the scene. We observe however that the world constrains the body and vice-versa. To motivate this, we show that current 3D human pose estimation methods produce results that are not consistent with the 3D scene. Our key contribution is to exploit static 3D scene structure to better estimate human pose from monocular images. The method enforces Proximal Relationships with Object eXclusion and is called PROX. To test this, we collect a new dataset composed of 12 different 3D scenes and RGB sequences of 20 subjects moving in and interacting with the scenes. We represent human pose using the 3D human body model SMPL-X and extend SMPLify-X to estimate body pose using scene constraints. We make use of the 3D scene information by formulating two main constraints. The inter-penetration constraint penalizes intersection between the body model and the surrounding 3D scene. The contact constraint encourages specific parts of the body to be in contact with scene surfaces if they are close enough in distance and orientation. For quantitative evaluation we capture a separate dataset with 180 RGB frames in which the ground-truth body pose is estimated using a motion capture system. We show quantitatively that introducing scene constraints significantly reduces 3D joint error and vertex error. Our code and data are available for research at https://prox.is.tue.mpg.de.",
"fno": "480300c282",
"keywords": [
"Image Motion Analysis",
"Image Reconstruction",
"Image Sequences",
"Pose Estimation",
"3 D Human Pose Ambiguities",
"Scene Constraints",
"Human Behavior",
"Current 3 D Human",
"Estimation Methods",
"Estimate Human",
"3 D Human Body Model SMPL X",
"Scene Surfaces",
"Vertex Error",
"Three Dimensional Displays",
"Solid Modeling",
"Pose Estimation",
"Cameras",
"Image Reconstruction",
"Shape",
"Two Dimensional Displays"
],
"authors": [
{
"affiliation": "Max Planck Institute for Intelligent Systems",
"fullName": "Mohamed Hassan",
"givenName": "Mohamed",
"surname": "Hassan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Institute for Intelligent Systems",
"fullName": "Vasileios Choutas",
"givenName": "Vasileios",
"surname": "Choutas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Institute for Intelligent Systems",
"fullName": "Dimitrios Tzionas",
"givenName": "Dimitrios",
"surname": "Tzionas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Institute for Intelligent Systems",
"fullName": "Michael Black",
"givenName": "Michael",
"surname": "Black",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "2282-2292",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4803-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "480300c272",
"articleId": "1hVlzcYSNig",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "480300c293",
"articleId": "1hQqm60gQVy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457f543",
"title": "Recurrent 3D Pose Sequence Machines",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f543/12OmNwCsdJE",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a506",
"title": "Monocular 3D Human Pose Estimation in the Wild Using Improved CNN Supervision",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a506/12OmNxdDFF9",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486576",
"title": "Joint Multi-View People Tracking and Pose Estimation for 3D Scene Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486576/14jQfQj2fTp",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a474",
"title": "Rethinking Pose in 3D: Multi-stage Refinement and Recovery for Markerless Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a474/17D45VN31gb",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a120",
"title": "Single-Shot Multi-person 3D Pose Estimation from Monocular RGB",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a120/17D45WaTken",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000h307",
"title": "Ordinal Depth Supervision for 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000h307/17D45Wda7gh",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300i647",
"title": "Holistic++ Scene Understanding: Single-View 3D Holistic Scene Parsing and Human Pose Estimation With Human-Object Interaction and Physical Commonsense",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300i647/1hQqhrIltn2",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c272",
"title": "Exploiting Spatial-Temporal Relationships for 3D Pose Estimation via Graph Convolutional Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c272/1hVlzcYSNig",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2019/4752/0/09212996",
"title": "3D Human Pose Estimation with Adversarial Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2019/09212996/1nHRTVvYYVi",
"parentPublication": {
"id": "proceedings/icvrv/2019/4752/0",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a334",
"title": "Holistic 3D Human and Scene Mesh Estimation from Single View Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a334/1yeHKLo6jlK",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqMPfS8",
"title": "Computer Graphics and Applications, Pacific Conference on",
"acronym": "pg",
"groupId": "1000130",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNApcutB",
"doi": "10.1109/PCCGA.2002.1167892",
"title": "On the Effects of Haptic Display in Brush and Ink Simulation for Chinese Painting and Calligraphy",
"normalizedTitle": "On the Effects of Haptic Display in Brush and Ink Simulation for Chinese Painting and Calligraphy",
"abstract": "In this paper, we develop an interactive haptic system, which can be further aid for digital Chinese painting. When an artist is holding our force feedback device, one feels like holding a real painting brush with all the contact and bending forces, since the viscosity, friction, and the bending force of a brush touching the paper are simulated. First we derive a physical dynamics model as bending springs for bristles to construct a 3D brush. Then we simulate the ink-water transfer system for ink spreading and color blending. Our system is a real-time system and users can interact with it holding a digital brush supported by either a Phantom force feedback device or a \"WACOM pressure sensing pen\" on a tablet. A pilot experiment was conducted, and the results show that brush writing with haptic feedback is better than that of same visual display but without haptic feedback.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we develop an interactive haptic system, which can be further aid for digital Chinese painting. When an artist is holding our force feedback device, one feels like holding a real painting brush with all the contact and bending forces, since the viscosity, friction, and the bending force of a brush touching the paper are simulated. First we derive a physical dynamics model as bending springs for bristles to construct a 3D brush. Then we simulate the ink-water transfer system for ink spreading and color blending. Our system is a real-time system and users can interact with it holding a digital brush supported by either a Phantom force feedback device or a \"WACOM pressure sensing pen\" on a tablet. A pilot experiment was conducted, and the results show that brush writing with haptic feedback is better than that of same visual display but without haptic feedback.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we develop an interactive haptic system, which can be further aid for digital Chinese painting. When an artist is holding our force feedback device, one feels like holding a real painting brush with all the contact and bending forces, since the viscosity, friction, and the bending force of a brush touching the paper are simulated. First we derive a physical dynamics model as bending springs for bristles to construct a 3D brush. Then we simulate the ink-water transfer system for ink spreading and color blending. Our system is a real-time system and users can interact with it holding a digital brush supported by either a Phantom force feedback device or a \"WACOM pressure sensing pen\" on a tablet. A pilot experiment was conducted, and the results show that brush writing with haptic feedback is better than that of same visual display but without haptic feedback.",
"fno": "17840439",
"keywords": [
"Haptics",
"Human Computer Interaction",
"Painting Systems",
"Chinese Painting",
"Calligraphy"
],
"authors": [
{
"affiliation": "National Taiwan University",
"fullName": "Jeng-sheng Yeh",
"givenName": "Jeng-sheng",
"surname": "Yeh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taiwan University",
"fullName": "Ting-yu Lien",
"givenName": "Ting-yu",
"surname": "Lien",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taiwan University",
"fullName": "Ming Ouhyoung",
"givenName": "Ming",
"surname": "Ouhyoung",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-10-01T00:00:00",
"pubType": "proceedings",
"pages": "439",
"year": "2002",
"issn": null,
"isbn": "0-7695-1784-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "17840435",
"articleId": "12OmNzVXNZU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "17840442",
"articleId": "12OmNz61dsk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccae/2009/3569/0/3569a160",
"title": "Haptic Device Application in Persian Calligraphy",
"doi": null,
"abstractUrl": "/proceedings-article/iccae/2009/3569a160/12OmNBkP3wM",
"parentPublication": {
"id": "proceedings/iccae/2009/3569/0",
"title": "2009 International Conference on Computer and Automation Engineering. ICCAE 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2002/1784/0/17840413",
"title": "An Efficient Brush Model for Physically-Based 3D Painting",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840413/12OmNBuL1iB",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539787",
"title": "A Method to Generate Writing-Brush-Style Japanese Hiragana Character Calligraphy",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539787/12OmNCfjeF3",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480776",
"title": "Cutting, Deforming and Painting of 3D meshes in a Two Handed Viso-haptic VR System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480776/12OmNCwlacX",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ds-rt/2008/3425/0/3425a269",
"title": "Error Control Based on Importance in a Remote Haptic Calligraphy System",
"doi": null,
"abstractUrl": "/proceedings-article/ds-rt/2008/3425a269/12OmNyYm2uH",
"parentPublication": {
"id": "proceedings/ds-rt/2008/3425/0",
"title": "Distributed Simulation and Real Time Applications, IEEE/ACM International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fbie/2008/3561/0/3561a090",
"title": "Image Auto-generation of Brush Modeling and Ink Diffusion Chinese Ink Paintin",
"doi": null,
"abstractUrl": "/proceedings-article/fbie/2008/3561a090/12OmNyv7mea",
"parentPublication": {
"id": "proceedings/fbie/2008/3561/0",
"title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2005/2432/0/24320989",
"title": "HUA: An Interactive Calligraphy and Ink-Wash Painting System",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2005/24320989/12OmNywfKzx",
"parentPublication": {
"id": "proceedings/cit/2005/2432/0",
"title": "The Fifth International Conference on Computer and Information Technology CIT 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920119",
"title": "ArtNova: Touch-Enabled 3D Model Design",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920119/12OmNzTH0Wu",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2009/3641/0/3641a683",
"title": "Animating the Brush-writing Process of Chinese Calligraphy Characters",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2009/3641a683/12OmNzhna80",
"parentPublication": {
"id": "proceedings/icis/2009/3641/0",
"title": "Computer and Information Science, ACIS International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2004/05/mcg2004050076",
"title": "Real-Time Painting with an Expressive Virtual Chinese Brush",
"doi": null,
"abstractUrl": "/magazine/cg/2004/05/mcg2004050076/13rRUwfqpG4",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzuZUzo",
"title": "2010 International Conference on Multimedia Communications (Mediacom 2010)",
"acronym": "mediacom",
"groupId": "1800264",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrJ11w2",
"doi": "10.1109/MEDIACOM.2010.71",
"title": "A Survey of Rendering of Chinese Painting",
"normalizedTitle": "A Survey of Rendering of Chinese Painting",
"abstract": "In this paper, through analysis the characteristic of Chinese traditional ink painting, we introduce the current main methods of computer simulation from different aspects, elaborate several classical brush, paper models, summarizes their respectively character, discuss the ink diffusion methods during the process of drawing, In addition, introduce other Chinese painting simulation methods and discuss the future research directions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, through analysis the characteristic of Chinese traditional ink painting, we introduce the current main methods of computer simulation from different aspects, elaborate several classical brush, paper models, summarizes their respectively character, discuss the ink diffusion methods during the process of drawing, In addition, introduce other Chinese painting simulation methods and discuss the future research directions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, through analysis the characteristic of Chinese traditional ink painting, we introduce the current main methods of computer simulation from different aspects, elaborate several classical brush, paper models, summarizes their respectively character, discuss the ink diffusion methods during the process of drawing, In addition, introduce other Chinese painting simulation methods and discuss the future research directions.",
"fno": "4136a153",
"keywords": [
"Chinese Ink Painting",
"Render",
"Model"
],
"authors": [
{
"affiliation": null,
"fullName": "Cao Yi",
"givenName": "Cao",
"surname": "Yi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wang Zhengxuan",
"givenName": "Wang",
"surname": "Zhengxuan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mediacom",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "153-156",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4136-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4136a149",
"articleId": "12OmNzh5z1t",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4136a157",
"articleId": "12OmNwO5LZy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-amh/2009/5508/0/05336722",
"title": "An intuitional interface for invocation of Chinese painting",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2009/05336722/12OmNAlvI7Q",
"parentPublication": {
"id": "proceedings/ismar-amh/2009/5508/0",
"title": "2009 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media and Humanities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2002/1784/0/17840439",
"title": "On the Effects of Haptic Display in Brush and Ink Simulation for Chinese Painting and Calligraphy",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840439/12OmNApcutB",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1995/7062/0/70620098",
"title": "A diffusion model for computer animation of diffuse ink painting",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1995/70620098/12OmNCbU2S0",
"parentPublication": {
"id": "proceedings/ca/1995/7062/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2007/2928/0/29280121",
"title": "The Research About Xuan-Paper Model in the Simulation of Chinese Painting",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2007/29280121/12OmNrNh0Am",
"parentPublication": {
"id": "proceedings/cgiv/2007/2928/0",
"title": "Computer Graphics, Imaging and Visualisation (CGIV 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2012/4836/0/4836a013",
"title": "An Automatic Rendering Method of Line Strokes for Chinese Landscape Painting",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2012/4836a013/12OmNy3RRF5",
"parentPublication": {
"id": "proceedings/icvrv/2012/4836/0",
"title": "2012 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fbie/2008/3561/0/3561a090",
"title": "Image Auto-generation of Brush Modeling and Ink Diffusion Chinese Ink Paintin",
"doi": null,
"abstractUrl": "/proceedings-article/fbie/2008/3561a090/12OmNyv7mea",
"parentPublication": {
"id": "proceedings/fbie/2008/3561/0",
"title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/12/07726076",
"title": "Data-Driven NPR Illustrations of Natural Flows in Chinese Painting",
"doi": null,
"abstractUrl": "/journal/tg/2017/12/07726076/13rRUwkfAZk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08113507",
"title": "Animated Construction of Chinese Brush Paintings",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08113507/14H4WNjKxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a284",
"title": "Real-time Rendering of 3D Animal Models in Chinese Ink Painting Style",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a284/1p1grC3XnGw",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCvLY1Q",
"title": "Proceedings Sixth International Conference on Tools with Artificial Intelligence. TAI 94",
"acronym": "tai",
"groupId": "1000763",
"volume": "0",
"displayVolume": "0",
"year": "1994",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwtn3wN",
"doi": "10.1109/TAI.1994.346474",
"title": "An attempt to apply the dividing and composing method to various frames of Chinese flower and bird paintings",
"normalizedTitle": "An attempt to apply the dividing and composing method to various frames of Chinese flower and bird paintings",
"abstract": "In our previous paper (1990), we discussed methods of expressing the composition knowledge of Chinese flower and bird paintings and proposed the dividing and composing method which could support computer painting based on the composition rules. In this paper, we attempt to apply this dividing and composing method to the various frames of paper (paintings using paper of different shapes) of Chinese flower and bird paintings. First, we analyzed the composition rules and described the expression method of composition knowledge in different frames of paper. Then, we defined the characteristic volume of painting elements, the frame centroid, the number of divisions and the division displacement to extract a dividing model. This procedure made possible the composition formation of various frames of paper.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "In our previous paper (1990), we discussed methods of expressing the composition knowledge of Chinese flower and bird paintings and proposed the dividing and composing method which could support computer painting based on the composition rules. In this paper, we attempt to apply this dividing and composing method to the various frames of paper (paintings using paper of different shapes) of Chinese flower and bird paintings. First, we analyzed the composition rules and described the expression method of composition knowledge in different frames of paper. Then, we defined the characteristic volume of painting elements, the frame centroid, the number of divisions and the division displacement to extract a dividing model. This procedure made possible the composition formation of various frames of paper.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In our previous paper (1990), we discussed methods of expressing the composition knowledge of Chinese flower and bird paintings and proposed the dividing and composing method which could support computer painting based on the composition rules. In this paper, we attempt to apply this dividing and composing method to the various frames of paper (paintings using paper of different shapes) of Chinese flower and bird paintings. First, we analyzed the composition rules and described the expression method of composition knowledge in different frames of paper. Then, we defined the characteristic volume of painting elements, the frame centroid, the number of divisions and the division displacement to extract a dividing model. This procedure made possible the composition formation of various frames of paper.",
"fno": "00346474",
"keywords": [
"Knowledge Based Systems",
"Computer Graphics",
"Art",
"Knowledge Representation",
"Dividing And Composing Method",
"Chinese Flower Painting",
"Bird Painting",
"Composition Knowledge",
"Computer Painting",
"Composition Rules",
"Expression Method",
"Characteristic Volume",
"Frame Centroid",
"Division Displacement",
"Birds",
"Painting",
"Knowledge Engineering",
"Shape",
"Data Mining",
"Cities And Towns",
"Information Processing",
"Application Software",
"Information Analysis",
"Ink"
],
"authors": [
{
"affiliation": "Fac. of Eng., Hokkaido Univ., Sapporo, Japan",
"fullName": "Da-Yu Liu",
"givenName": null,
"surname": "Da-Yu Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fac. of Eng., Hokkaido Univ., Sapporo, Japan",
"fullName": "Y. Aoki",
"givenName": "Y.",
"surname": "Aoki",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "tai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1994-01-01T00:00:00",
"pubType": "proceedings",
"pages": "317,318,319,320,321,322,323,324",
"year": "1994",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00346473",
"articleId": "12OmNx4yvFg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00346475",
"articleId": "12OmNAgoV7U",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dicta/2005/2467/0/24670073",
"title": "Image Indexing and Retrieval for a Vietnamese Folk Paintings Gallery",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2005/24670073/12OmNBqv29i",
"parentPublication": {
"id": "proceedings/dicta/2005/2467/0",
"title": "Digital Image Computing: Techniques and Applications (DICTA'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/12/07726076",
"title": "Data-Driven NPR Illustrations of Natural Flows in Chinese Painting",
"doi": null,
"abstractUrl": "/journal/tg/2017/12/07726076/13rRUwkfAZk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08113507",
"title": "Animated Construction of Chinese Brush Paintings",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08113507/14H4WNjKxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a521",
"title": "Ancient Painting to Natural Image: A New Solution for Painting Processing",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a521/18j8QuxyyWI",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icid/2021/2065/0/206500a297",
"title": "Feasibility analysis of digital creation of Chinese traditional freehand flower-and-bird painting",
"doi": null,
"abstractUrl": "/proceedings-article/icid/2021/206500a297/1AjTAuZJbzO",
"parentPublication": {
"id": "proceedings/icid/2021/2065/0",
"title": "2021 2nd International Conference on Intelligent Design (ICID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412005",
"title": "One-shot learning for acoustic identification of bird species in non-stationary environments",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412005/1tmj1xvDB0k",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1471",
"title": "Your “Flamingo” is My “Bird”: Fine-Grained, or Not",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1471/1yeIdxkKPSM",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzuIjee",
"title": "Digital Media and Digital Content Management, Workshop on",
"acronym": "dmdcm",
"groupId": "1800440",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyQGSov",
"doi": "10.1109/DMDCM.2011.13",
"title": "Creating for 3D Digital Chinese Ink-Wash Landscape Paintings Based on Maya",
"normalizedTitle": "Creating for 3D Digital Chinese Ink-Wash Landscape Paintings Based on Maya",
"abstract": "Putting forward the idea of simulating the \"three-perspective method\" and \"cavalier perspective\" layout in the Chinese landscape paintings theory based on \"Oblique Projection\", with a successful realization. A process of brush rendering and multi-layer rendering with characteristics of Chinese ink-wash landscape paintings has been brought forward, thus explores the ideas of creating 3D digital Chinese ink-wash landscape paintings. Dynamic and highly-realistic 3D ink-wash landscapes are brought out in the visual digital 3D space, which the traditional 2D ink-wash landscapes could not express.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Putting forward the idea of simulating the \"three-perspective method\" and \"cavalier perspective\" layout in the Chinese landscape paintings theory based on \"Oblique Projection\", with a successful realization. A process of brush rendering and multi-layer rendering with characteristics of Chinese ink-wash landscape paintings has been brought forward, thus explores the ideas of creating 3D digital Chinese ink-wash landscape paintings. Dynamic and highly-realistic 3D ink-wash landscapes are brought out in the visual digital 3D space, which the traditional 2D ink-wash landscapes could not express.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Putting forward the idea of simulating the \"three-perspective method\" and \"cavalier perspective\" layout in the Chinese landscape paintings theory based on \"Oblique Projection\", with a successful realization. A process of brush rendering and multi-layer rendering with characteristics of Chinese ink-wash landscape paintings has been brought forward, thus explores the ideas of creating 3D digital Chinese ink-wash landscape paintings. Dynamic and highly-realistic 3D ink-wash landscapes are brought out in the visual digital 3D space, which the traditional 2D ink-wash landscapes could not express.",
"fno": "4413a013",
"keywords": [
"Maya",
"3 D",
"Digital",
"Chinese Ink Wash Landscape Paintings"
],
"authors": [
{
"affiliation": null,
"fullName": "Xunxiang Li",
"givenName": "Xunxiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dmdcm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-05-01T00:00:00",
"pubType": "proceedings",
"pages": "13-17",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4413-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4413a009",
"articleId": "12OmNrAv3Rw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4413a018",
"articleId": "12OmNBaBuPy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pma/2006/2851/0/2851a328",
"title": "Simulation of Chinese Ink-Wash Painting Based on Landscapes and Trees",
"doi": null,
"abstractUrl": "/proceedings-article/pma/2006/2851a328/12OmNAZfxHd",
"parentPublication": {
"id": "proceedings/pma/2006/2851/0",
"title": "2006 Second International Symposium on Plant Growth Modeling, Simulation, Visualization and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2003/1946/0/19460152",
"title": "\"Nijimi\" Rendering Algorithm for Creating Quality Black Ink Paintings",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2003/19460152/12OmNqG0SMZ",
"parentPublication": {
"id": "proceedings/cgi/2003/1946/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270478",
"title": "A Real-Time ProCam System for Interaction with Chinese Ink-and-Wash Cartoons",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270478/12OmNxEjY8Z",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2005/2296/0/22960598",
"title": "Oriental Color Ink Rendering for Landscape",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2005/22960598/12OmNxisR2n",
"parentPublication": {
"id": "proceedings/icis/2005/2296/0",
"title": "Proceedings. Fourth Annual ACIS International Conference on Computer and Information Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2012/4836/0/4836a013",
"title": "An Automatic Rendering Method of Line Strokes for Chinese Landscape Painting",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2012/4836a013/12OmNy3RRF5",
"parentPublication": {
"id": "proceedings/icvrv/2012/4836/0",
"title": "2012 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2005/2432/0/24320989",
"title": "HUA: An Interactive Calligraphy and Ink-Wash Painting System",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2005/24320989/12OmNywfKzx",
"parentPublication": {
"id": "proceedings/cit/2005/2432/0",
"title": "The Fifth International Conference on Computer and Information Technology CIT 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2005/2392/0/23920317",
"title": "Physical Modeling of \"Xuan\" Paper in the Simulation of Chinese Ink-Wash Drawing",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2005/23920317/12OmNzTppDM",
"parentPublication": {
"id": "proceedings/cgiv/2005/2392/0",
"title": "International Conference on Computer Graphics, Imaging and Visualization (CGIV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2016/4155/0/4155a477",
"title": "Using Blur and Edge Detection for Conversing Image to the Ink-Wash Effect",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2016/4155a477/12OmNzvQHVm",
"parentPublication": {
"id": "proceedings/icris/2016/4155/0",
"title": "2016 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icime/2018/7616/0/761600a205",
"title": "A Method for Ink-Wash Painting Rendering for 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2018/761600a205/17D45WGGoLy",
"parentPublication": {
"id": "proceedings/icime/2018/7616/0",
"title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a296",
"title": "Design and implementation of immersive ink art",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a296/1vg7DnnY38k",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzvQHK2",
"title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)",
"acronym": "fbie",
"groupId": "1002779",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyv7mea",
"doi": "10.1109/FBIE.2008.112",
"title": "Image Auto-generation of Brush Modeling and Ink Diffusion Chinese Ink Paintin",
"normalizedTitle": "Image Auto-generation of Brush Modeling and Ink Diffusion Chinese Ink Paintin",
"abstract": "Traditional Chinese ink painting has developed continuously over a period of more than three thousand years. Simulating the traditional painting art by computer graphics is a challenging and attractive subject. In this paper we describe how to synthesize a complete Chinese ink painting. First of all, we propose a brush model. We initialize a data structure of brush to simulate different brushworks and use this data structures to parameterize the brush model and its behavior. Based on this model, we can implement a particular artistic effect \"Half-Dry\". In order to simulate the Chinese ink painting more natural and realistic, we propose a new diffusion algorithm based on fractal, which is based on the method of Random Midpoint Displacement. By all the methods above we can simulate Chinese ink painting realistically and naturally.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Traditional Chinese ink painting has developed continuously over a period of more than three thousand years. Simulating the traditional painting art by computer graphics is a challenging and attractive subject. In this paper we describe how to synthesize a complete Chinese ink painting. First of all, we propose a brush model. We initialize a data structure of brush to simulate different brushworks and use this data structures to parameterize the brush model and its behavior. Based on this model, we can implement a particular artistic effect \"Half-Dry\". In order to simulate the Chinese ink painting more natural and realistic, we propose a new diffusion algorithm based on fractal, which is based on the method of Random Midpoint Displacement. By all the methods above we can simulate Chinese ink painting realistically and naturally.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Traditional Chinese ink painting has developed continuously over a period of more than three thousand years. Simulating the traditional painting art by computer graphics is a challenging and attractive subject. In this paper we describe how to synthesize a complete Chinese ink painting. First of all, we propose a brush model. We initialize a data structure of brush to simulate different brushworks and use this data structures to parameterize the brush model and its behavior. Based on this model, we can implement a particular artistic effect \"Half-Dry\". In order to simulate the Chinese ink painting more natural and realistic, we propose a new diffusion algorithm based on fractal, which is based on the method of Random Midpoint Displacement. By all the methods above we can simulate Chinese ink painting realistically and naturally.",
"fno": "3561a090",
"keywords": [
"Chinese Ink Painting",
"Brush Model",
"Random Midpoint Displacement",
"Fractal Diffusion"
],
"authors": [
{
"affiliation": null,
"fullName": "Shao-dan Jin",
"givenName": "Shao-dan",
"surname": "Jin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tian-ding Chen",
"givenName": "Tian-ding",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fbie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "90-94",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3561-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3561a086",
"articleId": "12OmNzhnadK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3561a095",
"articleId": "12OmNBc1uwo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pg/2002/1784/0/17840439",
"title": "On the Effects of Haptic Display in Brush and Ink Simulation for Chinese Painting and Calligraphy",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840439/12OmNApcutB",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1995/7062/0/70620098",
"title": "A diffusion model for computer animation of diffuse ink painting",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1995/70620098/12OmNCbU2S0",
"parentPublication": {
"id": "proceedings/ca/1995/7062/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mediacom/2010/4136/0/4136a153",
"title": "A Survey of Rendering of Chinese Painting",
"doi": null,
"abstractUrl": "/proceedings-article/mediacom/2010/4136a153/12OmNrJ11w2",
"parentPublication": {
"id": "proceedings/mediacom/2010/4136/0",
"title": "2010 International Conference on Multimedia Communications (Mediacom 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2005/2432/0/24320989",
"title": "HUA: An Interactive Calligraphy and Ink-Wash Painting System",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2005/24320989/12OmNywfKzx",
"parentPublication": {
"id": "proceedings/cit/2005/2432/0",
"title": "The Fifth International Conference on Computer and Information Technology CIT 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a158",
"title": "Real-Time Watercolor Simulation with Fluid Vorticity Within Brush Stroke",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a158/12OmNyxXlwN",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2006/2687/0/04019925",
"title": "Modeling Scratchiness Effect of Oriental Writing Brush",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2006/04019925/12OmNzV70Gm",
"parentPublication": {
"id": "proceedings/cit/2006/2687/0",
"title": "The Sixth IEEE International Conference on Computer and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2004/05/mcg2004050076",
"title": "Real-Time Painting with an Expressive Virtual Chinese Brush",
"doi": null,
"abstractUrl": "/magazine/cg/2004/05/mcg2004050076/13rRUwfqpG4",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08113507",
"title": "Animated Construction of Chinese Brush Paintings",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08113507/14H4WNjKxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a284",
"title": "Real-time Rendering of 3D Animal Models in Chinese Ink Painting Style",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a284/1p1grC3XnGw",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBRbknI",
"title": "The Sixth IEEE International Conference on Computer and Information Technology",
"acronym": "cit",
"groupId": "1001306",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzV70Gm",
"doi": "10.1109/CIT.2006.124",
"title": "Modeling Scratchiness Effect of Oriental Writing Brush",
"normalizedTitle": "Modeling Scratchiness Effect of Oriental Writing Brush",
"abstract": "Oriental ink writing consists of simple strokes intended to convey an artist's deep feelings that makes it to be regarded as a painting. Scratchiness in Japanese Kasure is one of the techniques which can express writer's emotion remarkably in calligraphy. A novel method to express Kasure focused on the change of the energy is proposed in this paper. An equation is designed to simulate oriental drawing. The ink in the brush permeates into a paper according to the equation which concerns the brush speed and volume of paper where the brush touches. Besides energy transmission equation, the droplets model is described in this paper to implement Kasure. The entire stroke area is assumed as the trail the droplets have swept. The hull of separated droplets elegantly expresses the scratchiness. We introduce the line form scratchiness segment as a basic unit for filling the between the droplets. The line shape components we propose is suited for expressing scratchiness. We present some examples of scratchiness as the results, and the emotion such as power and passion of the writer is expressed by proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Oriental ink writing consists of simple strokes intended to convey an artist's deep feelings that makes it to be regarded as a painting. Scratchiness in Japanese Kasure is one of the techniques which can express writer's emotion remarkably in calligraphy. A novel method to express Kasure focused on the change of the energy is proposed in this paper. An equation is designed to simulate oriental drawing. The ink in the brush permeates into a paper according to the equation which concerns the brush speed and volume of paper where the brush touches. Besides energy transmission equation, the droplets model is described in this paper to implement Kasure. The entire stroke area is assumed as the trail the droplets have swept. The hull of separated droplets elegantly expresses the scratchiness. We introduce the line form scratchiness segment as a basic unit for filling the between the droplets. The line shape components we propose is suited for expressing scratchiness. We present some examples of scratchiness as the results, and the emotion such as power and passion of the writer is expressed by proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Oriental ink writing consists of simple strokes intended to convey an artist's deep feelings that makes it to be regarded as a painting. Scratchiness in Japanese Kasure is one of the techniques which can express writer's emotion remarkably in calligraphy. A novel method to express Kasure focused on the change of the energy is proposed in this paper. An equation is designed to simulate oriental drawing. The ink in the brush permeates into a paper according to the equation which concerns the brush speed and volume of paper where the brush touches. Besides energy transmission equation, the droplets model is described in this paper to implement Kasure. The entire stroke area is assumed as the trail the droplets have swept. The hull of separated droplets elegantly expresses the scratchiness. We introduce the line form scratchiness segment as a basic unit for filling the between the droplets. The line shape components we propose is suited for expressing scratchiness. We present some examples of scratchiness as the results, and the emotion such as power and passion of the writer is expressed by proposed method.",
"fno": "04019925",
"keywords": [
"Writing",
"Brushes",
"Ink",
"Painting",
"Equations",
"Art",
"Filling",
"Spline",
"Computer Science",
"Cities And Towns"
],
"authors": [
{
"affiliation": "The University of Aizu, Japan",
"fullName": "Atsushi Takeda",
"givenName": "Atsushi",
"surname": "Takeda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Aizu, Japan",
"fullName": "Jungpil Shin",
"givenName": "Jungpil",
"surname": "Shin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Aizu, Japan",
"fullName": "Won-du Chang",
"givenName": "Won-du",
"surname": "Chang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-09-01T00:00:00",
"pubType": "proceedings",
"pages": "121",
"year": "2006",
"issn": null,
"isbn": "0-7695-2687-X",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "26870120",
"articleId": "12OmNx3q6Yu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "26870123",
"articleId": "12OmNAolGUJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-graphics/2015/8020/0/07450430",
"title": "Modeling Virtual Chinese Brush Using Camshaft Curve and Stochastic Process",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450430/12OmNBqv2a0",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2006/0310/0/04120392",
"title": "Evolutionary Replication of Calligraphic Characters By A Robot Drawing Platform Using Experimentally Acquired Brush Footprint",
"doi": null,
"abstractUrl": "/proceedings-article/case/2006/04120392/12OmNC2OSLi",
"parentPublication": {
"id": "proceedings/case/2006/0310/0",
"title": "2006 IEEE International Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mediacom/2010/4136/0/4136a153",
"title": "A Survey of Rendering of Chinese Painting",
"doi": null,
"abstractUrl": "/proceedings-article/mediacom/2010/4136a153/12OmNrJ11w2",
"parentPublication": {
"id": "proceedings/mediacom/2010/4136/0",
"title": "2010 International Conference on Multimedia Communications (Mediacom 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2002/1784/0/17840452",
"title": "A Model Based Technique for Realistic Oriental Painting",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840452/12OmNyqiaUi",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fbie/2008/3561/0/3561a090",
"title": "Image Auto-generation of Brush Modeling and Ink Diffusion Chinese Ink Paintin",
"doi": null,
"abstractUrl": "/proceedings-article/fbie/2008/3561a090/12OmNyv7mea",
"parentPublication": {
"id": "proceedings/fbie/2008/3561/0",
"title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a158",
"title": "Real-Time Watercolor Simulation with Fluid Vorticity Within Brush Stroke",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a158/12OmNyxXlwN",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1999/03/mcg1999030074",
"title": "Simulating Oriental Black-Ink Painting",
"doi": null,
"abstractUrl": "/magazine/cg/1999/03/mcg1999030074/13rRUx0gecb",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08113507",
"title": "Animated Construction of Chinese Brush Paintings",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08113507/14H4WNjKxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798200",
"title": "Panoramic Fluid Painting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798200/1cJ0VsoPxfO",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1mA9XpQUfWo",
"title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"acronym": "mipr",
"groupId": "1825825",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1mAa1rF37eE",
"doi": "10.1109/MIPR49039.2020.00089",
"title": "Rule-Based Composition Grammar Analysis and Applications",
"normalizedTitle": "Rule-Based Composition Grammar Analysis and Applications",
"abstract": "This paper proposes a visual method of melody writing based on music rules and the digital characteristics of Equal-temperament. Specifically, clock diagrams are used to visualize the equal ratio and cyclic relationship in Equal-temperament. After the visualization of all the keys used in the composition, this graphic method can be used to implement compose on any key. With the rule-based composition grammar, new melody can be written with chords as the basic unit through a visual \"jump point\" link. This grammar is only used for pitches in melody composition, and note duration will be studied in our future research. For those non-musicians, the rule-based composition grammar can be used to compose from simple to complex. The grammar analysis can be applied not only to melody, but also to chords, harmony, orchestration and so on.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a visual method of melody writing based on music rules and the digital characteristics of Equal-temperament. Specifically, clock diagrams are used to visualize the equal ratio and cyclic relationship in Equal-temperament. After the visualization of all the keys used in the composition, this graphic method can be used to implement compose on any key. With the rule-based composition grammar, new melody can be written with chords as the basic unit through a visual \"jump point\" link. This grammar is only used for pitches in melody composition, and note duration will be studied in our future research. For those non-musicians, the rule-based composition grammar can be used to compose from simple to complex. The grammar analysis can be applied not only to melody, but also to chords, harmony, orchestration and so on.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a visual method of melody writing based on music rules and the digital characteristics of Equal-temperament. Specifically, clock diagrams are used to visualize the equal ratio and cyclic relationship in Equal-temperament. After the visualization of all the keys used in the composition, this graphic method can be used to implement compose on any key. With the rule-based composition grammar, new melody can be written with chords as the basic unit through a visual \"jump point\" link. This grammar is only used for pitches in melody composition, and note duration will be studied in our future research. For those non-musicians, the rule-based composition grammar can be used to compose from simple to complex. The grammar analysis can be applied not only to melody, but also to chords, harmony, orchestration and so on.",
"fno": "427200a404",
"keywords": [
"Data Visualisation",
"Grammars",
"Knowledge Based Systems",
"Music",
"Music Rules",
"Graphic Method",
"Visual Jump Point Link",
"Melody Composition",
"Rule Based Composition Grammar Analysis",
"Visual Method",
"Melody Writing",
"Digital Characteristics",
"Equal Temperament",
"Clock Diagrams",
"Visualization",
"Note Duration",
"Grammar Analysis",
"Grammar",
"Visualization",
"Clocks",
"Artificial Intelligence",
"Music",
"Frequency Modulation",
"Silicon Carbide",
"Equal Temperament",
"Composition Grammar",
"Chords Visualization"
],
"authors": [
{
"affiliation": "Tianjin University, China",
"fullName": "Lin Gan",
"givenName": "Lin",
"surname": "Gan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin University, China",
"fullName": "Likai Wei",
"givenName": "Likai",
"surname": "Wei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin University, China",
"fullName": "Yingjun Deng",
"givenName": "Yingjun",
"surname": "Deng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mipr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-08-01T00:00:00",
"pubType": "proceedings",
"pages": "404-407",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-4272-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "427200a400",
"articleId": "1mA9Z4FFJ7i",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "427200a408",
"articleId": "1mA9Xz1KoBG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2014/5118/0/5118c641",
"title": "From Stochastic Grammar to Bayes Network: Probabilistic Parsing of Complex Activity",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c641/12OmNAKuoRF",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1999/0216/0/02160228",
"title": "An NCE Context-Sensitive Graph Grammar for Visual Design Languages",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1999/02160228/12OmNwE9Oth",
"parentPublication": {
"id": "proceedings/vl/1999/0216/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a254",
"title": "Narrative Grammar in 360",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a254/12OmNxzuMIV",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/1995/7128/2/71281080",
"title": "A simplified attributed graph grammar for high-level music recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/1995/71281080/12OmNz61dAt",
"parentPublication": {
"id": "proceedings/icdar/1995/7128/2",
"title": "Proceedings of 3rd International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icebe/2014/6563/0/6563a139",
"title": "Self-Adaptive Web Service Composition Based on Stochastic Context-Free Grammar",
"doi": null,
"abstractUrl": "/proceedings-article/icebe/2014/6563a139/12OmNznkK5U",
"parentPublication": {
"id": "proceedings/icebe/2014/6563/0",
"title": "2014 IEEE 11th International Conference on e-Business Engineering (ICEBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539624",
"title": "Vega-Lite: A Grammar of Interactive Graphics",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539624/13rRUIJuxvn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2010/03/tts2010030431",
"title": "Program Behavior Discovery and Verification: A Graph Grammar Approach",
"doi": null,
"abstractUrl": "/journal/ts/2010/03/tts2010030431/13rRUy3xY4l",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csii/2018/7875/0/787501a096",
"title": "Improvement of Automatic Composition System Based on Melodic Outlines and Impression Words",
"doi": null,
"abstractUrl": "/proceedings-article/csii/2018/787501a096/13xI8AsfhNr",
"parentPublication": {
"id": "proceedings/csii/2018/7875/0",
"title": "2018 5th International Conference on Computational Science/Intelligence and Applied Informatics (CSII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09234027",
"title": "Gemini: A Grammar and Recommender System for Animated Transitions in Statistical Graphics",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09234027/1o531wbxsSk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09417674",
"title": "Nebula: A Coordinating Grammar of Graphics",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09417674/1taANyFFcmQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNviZlGb",
"title": "3D Data Processing Visualization and Transmission, International Symposium on",
"acronym": "3dpvt",
"groupId": "1000000",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxYtu2L",
"doi": "10.1109/TDPVT.2004.1335304",
"title": "Progressive Compression of Volumetric Subdivision Meshes",
"normalizedTitle": "Progressive Compression of Volumetric Subdivision Meshes",
"abstract": "We present a progressive compression technique for volumetric subdivision meshes based on the slow growing refinement algorithm. The system is comprised of a wavelet transform followed by a progressive encoding of the resulting wavelet coefficients. We compare the efficiency of two wavelet transforms. The first transform is based on the smoothing rules used in the slow growing subdivision technique. The second transform is a generalization of lifted linear B-spline wavelets to the same multi-tier refinement structure. Direct coupling with a hierarchical coder produces progressive bit streams. Rate distortion metrics are evaluated for both wavelet transforms. We tested the practical performance of the scheme on synthetic data as well as data from laser indirect-drive fusion simulations with multiple fields per vertex. Both wavelet transforms result in high quality trade off curves and produce qualitatively good coarse representations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a progressive compression technique for volumetric subdivision meshes based on the slow growing refinement algorithm. The system is comprised of a wavelet transform followed by a progressive encoding of the resulting wavelet coefficients. We compare the efficiency of two wavelet transforms. The first transform is based on the smoothing rules used in the slow growing subdivision technique. The second transform is a generalization of lifted linear B-spline wavelets to the same multi-tier refinement structure. Direct coupling with a hierarchical coder produces progressive bit streams. Rate distortion metrics are evaluated for both wavelet transforms. We tested the practical performance of the scheme on synthetic data as well as data from laser indirect-drive fusion simulations with multiple fields per vertex. Both wavelet transforms result in high quality trade off curves and produce qualitatively good coarse representations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a progressive compression technique for volumetric subdivision meshes based on the slow growing refinement algorithm. The system is comprised of a wavelet transform followed by a progressive encoding of the resulting wavelet coefficients. We compare the efficiency of two wavelet transforms. The first transform is based on the smoothing rules used in the slow growing subdivision technique. The second transform is a generalization of lifted linear B-spline wavelets to the same multi-tier refinement structure. Direct coupling with a hierarchical coder produces progressive bit streams. Rate distortion metrics are evaluated for both wavelet transforms. We tested the practical performance of the scheme on synthetic data as well as data from laser indirect-drive fusion simulations with multiple fields per vertex. Both wavelet transforms result in high quality trade off curves and produce qualitatively good coarse representations.",
"fno": "22230680",
"keywords": [],
"authors": [
{
"affiliation": "Lawrence Livermore National Laboratory",
"fullName": "Daniel Laney",
"givenName": "Daniel",
"surname": "Laney",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lawrence Livermore National Laboratory",
"fullName": "Valerio Pascucci",
"givenName": "Valerio",
"surname": "Pascucci",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dpvt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-09-01T00:00:00",
"pubType": "proceedings",
"pages": "680-687",
"year": "2004",
"issn": null,
"isbn": "0-7695-2223-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "22230673",
"articleId": "12OmNzuIjub",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "22230688",
"articleId": "12OmNASraQT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pg/2000/0868/0/08680408",
"title": "Progressive Geometry Compression for Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2000/08680408/12OmNAWYKFX",
"parentPublication": {
"id": "proceedings/pg/2000/0868/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacmedtek/1998/8667/0/86670384",
"title": "Progressive Coding of Medical Volumetric Data Using Three-Dimensional Integer Wavelet Packet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/pacmedtek/1998/86670384/12OmNButq89",
"parentPublication": {
"id": "proceedings/pacmedtek/1998/8667/0",
"title": "Pacific Medical Technology Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2004/2234/0/22340025",
"title": "Unlifted Loop Subdivision Wavelets",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2004/22340025/12OmNrFTrab",
"parentPublication": {
"id": "proceedings/pg/2004/2234/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2002/1521/0/15210470",
"title": "Multiresolution Distance Volumes for Progressive Surface Compression",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2002/15210470/12OmNwdtw8o",
"parentPublication": {
"id": "proceedings/3dpvt/2002/1521/0",
"title": "3D Data Processing Visualization and Transmission, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacific-graphics/2010/4205/0/4205a032",
"title": "Computing Efficient Matrix-valued Wavelets for Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a032/12OmNwekjFg",
"parentPublication": {
"id": "proceedings/pacific-graphics/2010/4205/0",
"title": "Pacific Conference on Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/04/v0365",
"title": "Multiresolution Analysis on Irregular Surface Meshes",
"doi": null,
"abstractUrl": "/journal/tg/1998/04/v0365/13rRUIM2VBu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/03/v0326",
"title": "Generalized B-Spline Subdivision-Surface Wavelets for Geometry Compression",
"doi": null,
"abstractUrl": "/journal/tg/2004/03/v0326/13rRUwjGoLw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/02/v0123",
"title": "Wavelet-Based Progressive Compression Scheme for Triangle Meshes: Wavemesh",
"doi": null,
"abstractUrl": "/journal/tg/2004/02/v0123/13rRUwwJWFG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/05/v0914",
"title": "√3-Subdivision-Based Biorthogonal Wavelets",
"doi": null,
"abstractUrl": "/journal/tg/2007/05/v0914/13rRUxZ0o1n",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08378005",
"title": "Biorthogonal Wavelet Transforms and Applications Based on Generalized Progressive Catmull-Clark Subdivision with Shape Control",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08378005/13rRUxly9e4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxA3Z4H",
"title": "IEEE International Conference on Shape Modeling and Applications",
"acronym": "smi",
"groupId": "1000664",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyRg4DU",
"doi": "10.1109/SMI.2008.4547938",
"title": "GPU smoothing of quad meshes",
"normalizedTitle": "GPU smoothing of quad meshes",
"abstract": "We present a fast algorithm for converting quad meshes on the GPU to smooth surfaces. Meshes with 12,000 input quads, of which 60% have one or more non-4-valent vertices, are converted, evaluated and rendered with 9 × 9 resolution per quad at 50 frames per second. The conversion reproduces bi-cubic splines wherever possible and closely mimics the shape of the Catmull-Clark subdivision surface by c-patches where a vertex has a valence different from 4. The smooth surface is piecewise polynomial and has well-defined normals everywhere. The evaluation avoids pixel dropout.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a fast algorithm for converting quad meshes on the GPU to smooth surfaces. Meshes with 12,000 input quads, of which 60% have one or more non-4-valent vertices, are converted, evaluated and rendered with 9 × 9 resolution per quad at 50 frames per second. The conversion reproduces bi-cubic splines wherever possible and closely mimics the shape of the Catmull-Clark subdivision surface by c-patches where a vertex has a valence different from 4. The smooth surface is piecewise polynomial and has well-defined normals everywhere. The evaluation avoids pixel dropout.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a fast algorithm for converting quad meshes on the GPU to smooth surfaces. Meshes with 12,000 input quads, of which 60% have one or more non-4-valent vertices, are converted, evaluated and rendered with 9 × 9 resolution per quad at 50 frames per second. The conversion reproduces bi-cubic splines wherever possible and closely mimics the shape of the Catmull-Clark subdivision surface by c-patches where a vertex has a valence different from 4. The smooth surface is piecewise polynomial and has well-defined normals everywhere. The evaluation avoids pixel dropout.",
"fno": "04547938",
"keywords": [],
"authors": [
{
"affiliation": "University of Florida, USA",
"fullName": "T. Ni",
"givenName": "T.",
"surname": "Ni",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida, USA",
"fullName": "Y. Yeo",
"givenName": "Y.",
"surname": "Yeo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida, USA",
"fullName": "A. Myles",
"givenName": "A.",
"surname": "Myles",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Advanced Micro Devices, USA",
"fullName": "V. Goel",
"givenName": "V.",
"surname": "Goel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida, USA",
"fullName": "J. Peters",
"givenName": "J.",
"surname": "Peters",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "smi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-06-01T00:00:00",
"pubType": "proceedings",
"pages": "3-9",
"year": "2008",
"issn": null,
"isbn": "978-1-4244-2260-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04547934",
"articleId": "12OmNAKuoTH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04547942",
"articleId": "12OmNzQzqeS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sc/1994/6605/0/00344287",
"title": "Meshes: the next generation",
"doi": null,
"abstractUrl": "/proceedings-article/sc/1994/00344287/12OmNAnuTFP",
"parentPublication": {
"id": "proceedings/sc/1994/6605/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/motion/2002/1860/0/18600253",
"title": "Real-time Tracking of Quad-Marked Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/motion/2002/18600253/12OmNBW0vAG",
"parentPublication": {
"id": "proceedings/motion/2002/1860/0",
"title": "Proceedings Workshop on Motion and Video Computing (MOTION 2002)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2004/2234/0/22340207",
"title": "Direct Anisotropic Quad-Dominant Remeshing",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2004/22340207/12OmNCfjerO",
"parentPublication": {
"id": "proceedings/pg/2004/2234/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2010/4286/2/4286b090",
"title": "Piecewise Smooth Surfaces Reconstruction for Finite Element Mixed Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2010/4286b090/12OmNrH1PBY",
"parentPublication": {
"id": "proceedings/icdma/2010/4286/2",
"title": "2010 International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2003/1988/0/19880065",
"title": "Efficient Handling of Shading Discontinuities for Progressive Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2003/19880065/12OmNvSKNQ2",
"parentPublication": {
"id": "proceedings/iv/2003/1988/0",
"title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2004/2075/0/20750191",
"title": "Least-Squares Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2004/20750191/12OmNyoiZ9E",
"parentPublication": {
"id": "proceedings/smi/2004/2075/0",
"title": "Proceedings. Shape Modeling International 2004",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2010/7259/0/05521462",
"title": "Reversely Anisotropic Quad-dominant Remeshing",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2010/05521462/12OmNywxlVm",
"parentPublication": {
"id": "proceedings/smi/2010/7259/0",
"title": "Shape Modeling International (SMI 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2008/2260/0/04547954",
"title": "Approximate topological matching of quadrilateral meshes",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2008/04547954/12OmNz5JBQB",
"parentPublication": {
"id": "proceedings/smi/2008/2260/0",
"title": "IEEE International Conference on Shape Modeling and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacific-graphics/2010/4205/0/4205a046",
"title": "Synthesizing Subdivision Meshes Using Real Time Tessellation",
"doi": null,
"abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a046/12OmNzahbXx",
"parentPublication": {
"id": "proceedings/pacific-graphics/2010/4205/0",
"title": "Pacific Conference on Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/05/07006805",
"title": "On Linear Spaces of Polyhedral Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2015/05/07006805/13rRUxYIMV0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvoWV1b",
"title": "Proceedings of IEEE Data Compression Conference (DCC'94)",
"acronym": "dcc",
"groupId": "1000177",
"volume": "0",
"displayVolume": "0",
"year": "1994",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAhOUJ6",
"doi": "10.1109/DCC.1994.305941",
"title": "Self-similarity of the multiresolutional image/video decomposition: smart expansion as compression of still and moving pictures",
"normalizedTitle": "Self-similarity of the multiresolutional image/video decomposition: smart expansion as compression of still and moving pictures",
"abstract": "The paper introduces a new combined fractal/multiresolutional image compression based on the observed property of self-similarity of the pyramidal image transform. The gist of the method is zooming out from a (possibly shrunken) low-resolution image producing a sharp and crisp \"natural looking\" high-resolution view, without blockiness and jaggedness. It is demonstrated that the technique possesses features of preserving thinness of lines on expansion, translational invariance and providing a perfect high-resolution representation of the gradient fill. The multiresolutional transform algorithms and 'smart' image magnification developed for still images have been generalized to deal with moving pictures as a three-dimensional, spatio-temporal frame sequence, which permits rapid compression, smooth motion interpolation, and has potential for use in video transmission in real time.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The paper introduces a new combined fractal/multiresolutional image compression based on the observed property of self-similarity of the pyramidal image transform. The gist of the method is zooming out from a (possibly shrunken) low-resolution image producing a sharp and crisp \"natural looking\" high-resolution view, without blockiness and jaggedness. It is demonstrated that the technique possesses features of preserving thinness of lines on expansion, translational invariance and providing a perfect high-resolution representation of the gradient fill. The multiresolutional transform algorithms and 'smart' image magnification developed for still images have been generalized to deal with moving pictures as a three-dimensional, spatio-temporal frame sequence, which permits rapid compression, smooth motion interpolation, and has potential for use in video transmission in real time.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The paper introduces a new combined fractal/multiresolutional image compression based on the observed property of self-similarity of the pyramidal image transform. The gist of the method is zooming out from a (possibly shrunken) low-resolution image producing a sharp and crisp \"natural looking\" high-resolution view, without blockiness and jaggedness. It is demonstrated that the technique possesses features of preserving thinness of lines on expansion, translational invariance and providing a perfect high-resolution representation of the gradient fill. The multiresolutional transform algorithms and 'smart' image magnification developed for still images have been generalized to deal with moving pictures as a three-dimensional, spatio-temporal frame sequence, which permits rapid compression, smooth motion interpolation, and has potential for use in video transmission in real time.",
"fno": "00305941",
"keywords": [
"Data Compression",
"Image Coding",
"Video Signals",
"Image Sequences",
"Image Segmentation",
"Fractals",
"Self Similarity",
"Multiresolutional Image Decomposition",
"Moving Picture Compression",
"Still Picture Compression",
"Video Decomposition",
"Fractal Image Compression",
"Multiresolutional Image Compression",
"Pyramidal Image Transform",
"Low Resolution Image",
"Translational Invariance",
"High Resolution Representation",
"Gradient Fill",
"Multiresolutional Transform Algorithms",
"Smart Image Magnification",
"3 D Spatio Temporal Frame Sequence",
"Motion Interpolation",
"Real Time Video Transmission",
"Image Resolution",
"Video Compression",
"Image Coding",
"Fractals",
"Wavelet Analysis",
"Interpolation",
"Pixel",
"Image Storage",
"Image Decomposition",
"Computer Science"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., Univ. of North Texas, Denton, TX, USA",
"fullName": "O. Kiselyov",
"givenName": "O.",
"surname": "Kiselyov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Univ. of North Texas, Denton, TX, USA",
"fullName": "P. Fisher",
"givenName": "P.",
"surname": "Fisher",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1994-01-01T00:00:00",
"pubType": "proceedings",
"pages": "331,332,333,334,335,336,337,338,339,340",
"year": "1994",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00305940",
"articleId": "12OmNvAAtlC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00305942",
"articleId": "12OmNy4IEXF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1995/7310/1/73100045",
"title": "Multiresolution model development for overlapping trees via canonical correlation analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100045/12OmNBUS73w",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2016/4400/0/4400a048",
"title": "Moving Obstacle Removal via Low Rank Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2016/4400a048/12OmNBaT5XO",
"parentPublication": {
"id": "proceedings/icdh/2016/4400/0",
"title": "2016 6th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6265/1/00576216",
"title": "Super-resolution estimation of edge images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576216/12OmNCxL9Si",
"parentPublication": {
"id": "proceedings/icpr/1994/6265/1",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifita/2010/7621/2/05634908",
"title": "Research on Super Resolution Reconstruction Based on Scale Space",
"doi": null,
"abstractUrl": "/proceedings-article/ifita/2010/05634908/12OmNqJ8tvY",
"parentPublication": {
"id": "proceedings/ifita/2010/7621/2",
"title": "2010 International Forum on Information Technology and Applications (IFITA 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1995/7012/0/70120470",
"title": "Multiresolutional piecewise-linear image decompositions: quantization error propagation and design of \"stable\" compression schemes",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1995/70120470/12OmNvAiSsA",
"parentPublication": {
"id": "proceedings/dcc/1995/7012/0",
"title": "Proceedings DCC '95 Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2000/0878/0/08780339",
"title": "Image Compression via TRITREE Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2000/08780339/12OmNvlxJo3",
"parentPublication": {
"id": "proceedings/sibgrapi/2000/0878/0",
"title": "Proceedings 13th Brazilian Symposium on Computer Graphics and Image Processing (Cat. No.PR00878)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206706",
"title": "Contextual decomposition of multi-label images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206706/12OmNx0A7Oj",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2008/2242/0/04587659",
"title": "Image super-resolution using gradient profile prior",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2008/04587659/12OmNx7G5SP",
"parentPublication": {
"id": "proceedings/cvpr/2008/2242/0",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711b045",
"title": "Image Super-Resolution via Low-Pass Filter Based Multi-scale Image Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711b045/12OmNz2C1wl",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2011/707/0/05753125",
"title": "Display-aware image editing",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2011/05753125/12OmNzcPAil",
"parentPublication": {
"id": "proceedings/iccp/2011/707/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyNQSG5",
"title": "EUROMICRO Conference",
"acronym": "euromicro",
"groupId": "1000279",
"volume": "0",
"displayVolume": "0",
"year": "2001",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAqCtNn",
"doi": "10.1109/EURMIC.2001.952481",
"title": "VLSI Implementation of Fractal Image Compression Processor for Moving Pictures",
"normalizedTitle": "VLSI Implementation of Fractal Image Compression Processor for Moving Pictures",
"abstract": "Abstract: This paper proposes an efficient VLSI architecture of fractal image coding for moving pictures. The proposed processor makes use of parallel searching for similar domain blocks by grouping range blocks by identical classes. Furthermore, to a encode moving picture at high-speed, utilizing the domain block information obtained in the coding of a previous frame to code the following frame is employed. According to this architecture, a smaller fractal image coding VLSI can be realized. The architecture is capable of high-speed, real-time encoding not only for still images but also for full-motion pictures using a circuit size. The compression ratios are 2-5 times higher, and the code processing time is 10 times faster than those of conventional fractal techniques. The adoption of the proposed VLSI architecture technique achieves real-time encoding of full-motion videos, and the circuit size of VLSI is much smaller than previously proposed fractal processors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract: This paper proposes an efficient VLSI architecture of fractal image coding for moving pictures. The proposed processor makes use of parallel searching for similar domain blocks by grouping range blocks by identical classes. Furthermore, to a encode moving picture at high-speed, utilizing the domain block information obtained in the coding of a previous frame to code the following frame is employed. According to this architecture, a smaller fractal image coding VLSI can be realized. The architecture is capable of high-speed, real-time encoding not only for still images but also for full-motion pictures using a circuit size. The compression ratios are 2-5 times higher, and the code processing time is 10 times faster than those of conventional fractal techniques. The adoption of the proposed VLSI architecture technique achieves real-time encoding of full-motion videos, and the circuit size of VLSI is much smaller than previously proposed fractal processors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract: This paper proposes an efficient VLSI architecture of fractal image coding for moving pictures. The proposed processor makes use of parallel searching for similar domain blocks by grouping range blocks by identical classes. Furthermore, to a encode moving picture at high-speed, utilizing the domain block information obtained in the coding of a previous frame to code the following frame is employed. According to this architecture, a smaller fractal image coding VLSI can be realized. The architecture is capable of high-speed, real-time encoding not only for still images but also for full-motion pictures using a circuit size. The compression ratios are 2-5 times higher, and the code processing time is 10 times faster than those of conventional fractal techniques. The adoption of the proposed VLSI architecture technique achieves real-time encoding of full-motion videos, and the circuit size of VLSI is much smaller than previously proposed fractal processors.",
"fno": "12360400",
"keywords": [],
"authors": [
{
"affiliation": "Osaka University and SANYO Electric Co., Ltd.",
"fullName": "Hideki Yamauchi",
"givenName": "Hideki",
"surname": "Yamauchi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University",
"fullName": "Yoshinori Takeuchi",
"givenName": "Yoshinori",
"surname": "Takeuchi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University",
"fullName": "Masaharu Imai",
"givenName": "Masaharu",
"surname": "Imai",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "euromicro",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2001-09-01T00:00:00",
"pubType": "proceedings",
"pages": "0400",
"year": "2001",
"issn": null,
"isbn": "0-7695-1236-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "12360392",
"articleId": "12OmNzYwcfS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "12360410",
"articleId": "12OmNzcxYUB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyqRn6E",
"title": "Proceedings The International Conference on Application Specific Array Processors",
"acronym": "asap",
"groupId": "1000036",
"volume": "0",
"displayVolume": "0",
"year": "1995",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBTJIE2",
"doi": "10.1109/ASAP.1995.522923",
"title": "MOVIE: A Building Block for the Design of Real Time Simulator of Moving Pictures Compression Algorithms",
"normalizedTitle": "MOVIE: A Building Block for the Design of Real Time Simulator of Moving Pictures Compression Algorithms",
"abstract": "This paper shows how real-time simulator of moving pictures compression algorithms can be rapidly assembled using a basic building block, here called MOVIE (MOdule for VIdeo Experimentation). The internal architecture of the MOVIE VLSI chip can be compared to a small systolic machine made of a 32-bit I/O processor, a reduced linear array of 16-bit computation processors and data video input/output mechanisms. Externally, the chip is provided with four 16-bit bidirectional data ports and three 16-bit bidirectional data video port. Several MOVIE chips can be easily clustered to allow the size of the linear array of computation processors to be increased. The MOVIE chip is fully programmable in a high level language in order to make program developments easier.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper shows how real-time simulator of moving pictures compression algorithms can be rapidly assembled using a basic building block, here called MOVIE (MOdule for VIdeo Experimentation). The internal architecture of the MOVIE VLSI chip can be compared to a small systolic machine made of a 32-bit I/O processor, a reduced linear array of 16-bit computation processors and data video input/output mechanisms. Externally, the chip is provided with four 16-bit bidirectional data ports and three 16-bit bidirectional data video port. Several MOVIE chips can be easily clustered to allow the size of the linear array of computation processors to be increased. The MOVIE chip is fully programmable in a high level language in order to make program developments easier.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper shows how real-time simulator of moving pictures compression algorithms can be rapidly assembled using a basic building block, here called MOVIE (MOdule for VIdeo Experimentation). The internal architecture of the MOVIE VLSI chip can be compared to a small systolic machine made of a 32-bit I/O processor, a reduced linear array of 16-bit computation processors and data video input/output mechanisms. Externally, the chip is provided with four 16-bit bidirectional data ports and three 16-bit bidirectional data video port. Several MOVIE chips can be easily clustered to allow the size of the linear array of computation processors to be increased. The MOVIE chip is fully programmable in a high level language in order to make program developments easier.",
"fno": "71090193",
"keywords": [
"Image Compression",
"Special Purpose Architecture",
"Systolic Architecture",
"Real Time Simulation"
],
"authors": [
{
"affiliation": "CCETT",
"fullName": "Ronan Barzic",
"givenName": "Ronan",
"surname": "Barzic",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CCETT",
"fullName": "Christian Bouville",
"givenName": "Christian",
"surname": "Bouville",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IRISA,Campus de Beaulieu",
"fullName": "Francois Charot",
"givenName": "Francois",
"surname": "Charot",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IRISA,Campus de Beaulieu",
"fullName": "Gwendal Le Fol",
"givenName": "Gwendal Le",
"surname": "Fol",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IRISA,Campus de Beaulieu",
"fullName": "Pascal Lemonnier",
"givenName": "Pascal",
"surname": "Lemonnier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IRISA,Campus de Beaulieu",
"fullName": "Charles Wagner",
"givenName": "Charles",
"surname": "Wagner",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "asap",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1995-07-01T00:00:00",
"pubType": "proceedings",
"pages": "193",
"year": "1995",
"issn": "1063-6862",
"isbn": "0-8186-7109-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "71090182",
"articleId": "12OmNBdruaq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "71090204",
"articleId": "12OmNvqW6Yx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx7ouUM",
"title": "2013 International Conference on Computational and Information Sciences",
"acronym": "iccis",
"groupId": "1800262",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvDI3W7",
"doi": "10.1109/ICCIS.2013.349",
"title": "Underwater Moving Object Detection Based on Codebook Model",
"normalizedTitle": "Underwater Moving Object Detection Based on Codebook Model",
"abstract": "Underwater video intelligent monitoring system extracts moving objects in a complex and dynamic scene with the impact of wave, illumination changes and reflection, while needs real-time character. To solve these situations, underwater moving object detection algorithm based on codebook model was presented. The algorithm creates a background codebook for each pixel according to color distortion and brightness distortion, and then extracts the moving objects using the current pixel to match with code words in the codebook. To adapt the complex changes of scene, the algorithm updates adaptively the background codebook model using the sample value of pixel of current image. The experiments show that the algorithm can extract the complete moving objects by overcoming the impact of underwater environment and inhibiting noise. And the algorithm has perfect real-time character.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Underwater video intelligent monitoring system extracts moving objects in a complex and dynamic scene with the impact of wave, illumination changes and reflection, while needs real-time character. To solve these situations, underwater moving object detection algorithm based on codebook model was presented. The algorithm creates a background codebook for each pixel according to color distortion and brightness distortion, and then extracts the moving objects using the current pixel to match with code words in the codebook. To adapt the complex changes of scene, the algorithm updates adaptively the background codebook model using the sample value of pixel of current image. The experiments show that the algorithm can extract the complete moving objects by overcoming the impact of underwater environment and inhibiting noise. And the algorithm has perfect real-time character.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Underwater video intelligent monitoring system extracts moving objects in a complex and dynamic scene with the impact of wave, illumination changes and reflection, while needs real-time character. To solve these situations, underwater moving object detection algorithm based on codebook model was presented. The algorithm creates a background codebook for each pixel according to color distortion and brightness distortion, and then extracts the moving objects using the current pixel to match with code words in the codebook. To adapt the complex changes of scene, the algorithm updates adaptively the background codebook model using the sample value of pixel of current image. The experiments show that the algorithm can extract the complete moving objects by overcoming the impact of underwater environment and inhibiting noise. And the algorithm has perfect real-time character.",
"fno": "5004b319",
"keywords": [
"Brightness",
"Adaptation Models",
"Real Time Systems",
"Image Color Analysis",
"Training",
"Noise",
"Object Detection",
"Real Time",
"Underwater",
"Moving Object Detection",
"Codebook Model",
"Color Distortion",
"Brightness Distortion"
],
"authors": [
{
"affiliation": null,
"fullName": "Lei Fei",
"givenName": "Lei",
"surname": "Fei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Huang Wen-lu",
"givenName": "Huang",
"surname": "Wen-lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhang Ze",
"givenName": "Zhang",
"surname": "Ze",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1319-1322",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5004-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5004b315",
"articleId": "12OmNyNQSAY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5004b323",
"articleId": "12OmNyLiuBe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cis/2014/7434/0/7434a704",
"title": "A Codebook Based Background Subtraction Method for Image Defects Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2014/7434a704/12OmNAFWOR2",
"parentPublication": {
"id": "proceedings/cis/2014/7434/0",
"title": "2014 Tenth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2014/7978/0/7978a119",
"title": "Foreground-Background Segmentation Based on Codebook and Edge Detector",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2014/7978a119/12OmNAJ4piS",
"parentPublication": {
"id": "proceedings/sitis/2014/7978/0",
"title": "2014 Tenth International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2015/8221/0/8221a109",
"title": "Moving Object Detection in Tennis Video",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2015/8221a109/12OmNAoDhR2",
"parentPublication": {
"id": "proceedings/icinis/2015/8221/0",
"title": "2015 8th International Conference on Intelligent Networks and Intelligent Systems (ICINIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chinacom/2015/8795/0/07498049",
"title": "Hierarchical codebook for moving microbe detection in sewage",
"doi": null,
"abstractUrl": "/proceedings-article/chinacom/2015/07498049/12OmNqHqSwK",
"parentPublication": {
"id": "proceedings/chinacom/2015/8795/0",
"title": "2015 10th International Conference on Communications and Networking in China (ChinaCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccet/2009/3521/2/3521b230",
"title": "Speedy Detection Algorithm of Underwater Moving Targets Based on Image Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/iccet/2009/3521b230/12OmNrkBwGD",
"parentPublication": {
"id": "proceedings/iccet/2009/3521/1",
"title": "Computer Engineering and Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fcst/2010/7779/0/05575857",
"title": "Color Comparison in Codebook Model for Moving Objects Detection",
"doi": null,
"abstractUrl": "/proceedings-article/fcst/2010/05575857/12OmNxETahI",
"parentPublication": {
"id": "proceedings/fcst/2010/7779/0",
"title": "2010 Fifth International Conference on Frontier of Computer Science and Technology (FCST 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icia/2006/0528/0/04097984",
"title": "A Codebook-based Video Moving Objects Detecting Method",
"doi": null,
"abstractUrl": "/proceedings-article/icia/2006/04097984/12OmNxymoaL",
"parentPublication": {
"id": "proceedings/icia/2006/0528/0",
"title": "2006 International Conference on Information Acquisition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2015/9721/0/9721a110",
"title": "An Adaptive Codebook Model for Change Detection with Dynamic Background",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2015/9721a110/12OmNzTH0Rc",
"parentPublication": {
"id": "proceedings/sitis/2015/9721/0",
"title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icipmc/2022/6872/0/687200a017",
"title": "Underwater Object Detection Based on Enhanced YOLO",
"doi": null,
"abstractUrl": "/proceedings-article/icipmc/2022/687200a017/1GIuobNeZEs",
"parentPublication": {
"id": "proceedings/icipmc/2022/6872/0",
"title": "2022 International Conference on Image Processing and Media Computing (ICIPMC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiipcc/2022/6287/0/628700a108",
"title": "Underwater salient object detection based on frequency-tuned",
"doi": null,
"abstractUrl": "/proceedings-article/aiipcc/2022/628700a108/1LR9VRgMTiE",
"parentPublication": {
"id": "proceedings/aiipcc/2022/6287/0",
"title": "2022 International Conference on Artificial Intelligence, Information Processing and Cloud Computing (AIIPCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvzJG4b",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "2",
"displayVolume": "2",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwp74sZ",
"doi": "10.1109/ICME.2004.1394323",
"title": "Mosaic based view enlargement for moving objects in moving pictures",
"normalizedTitle": "Mosaic based view enlargement for moving objects in moving pictures",
"abstract": "Conventional mosaicing techniques convert a video from frame-based representation to scene-based representation, but they usually lack dynamic information so that their mosaic is not complete. In this paper, we present a novel method to detect moving objects in the video sequences, then add them into the static background mosaic to represent the scene completely. This novel algorithm separates static and dynamic information in a video sequence, builds the background mosaic from static part and reconstructs moving objects on the static mosaic. We have implemented our techniques and the experimental results demonstrate the effectiveness of our approach",
"abstracts": [
{
"abstractType": "Regular",
"content": "Conventional mosaicing techniques convert a video from frame-based representation to scene-based representation, but they usually lack dynamic information so that their mosaic is not complete. In this paper, we present a novel method to detect moving objects in the video sequences, then add them into the static background mosaic to represent the scene completely. This novel algorithm separates static and dynamic information in a video sequence, builds the background mosaic from static part and reconstructs moving objects on the static mosaic. We have implemented our techniques and the experimental results demonstrate the effectiveness of our approach",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Conventional mosaicing techniques convert a video from frame-based representation to scene-based representation, but they usually lack dynamic information so that their mosaic is not complete. In this paper, we present a novel method to detect moving objects in the video sequences, then add them into the static background mosaic to represent the scene completely. This novel algorithm separates static and dynamic information in a video sequence, builds the background mosaic from static part and reconstructs moving objects on the static mosaic. We have implemented our techniques and the experimental results demonstrate the effectiveness of our approach",
"fno": "01394323",
"keywords": [
"Image Motion Analysis",
"Image Representation",
"Image Segmentation",
"Image Sequences",
"Natural Scenes",
"Video Signal Processing",
"Mosaic Based View Enlargement",
"Moving Objects",
"Moving Pictures",
"Mosaicing Techniques",
"Video Frame Based Representation Conversion",
"Video Scene Based Representation",
"Video Sequences",
"Static Background Mosaic",
"Dynamic Information",
"Static Information",
"Moving Object Reconstruction",
"Layout",
"Object Detection",
"Video Sequences",
"Image Reconstruction",
"Computer Science",
"Video Coding",
"Motion Detection",
"Focusing",
"Motion Estimation",
"Image Coding"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., Nat. Univ. of Singapore, Singapore",
"fullName": "Hui Shen",
"givenName": null,
"surname": "Hui Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Nat. Univ. of Singapore, Singapore",
"fullName": "M.S. Kankanhalli",
"givenName": "M.S.",
"surname": "Kankanhalli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Nat. Univ. of Singapore, Singapore",
"fullName": "S.H. Srinivasan",
"givenName": "S.H.",
"surname": "Srinivasan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Nat. Univ. of Singapore, Singapore",
"fullName": "Wei-Qi Yan",
"givenName": null,
"surname": "Wei-Qi Yan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-01-01T00:00:00",
"pubType": "proceedings",
"pages": "807,808,809,810",
"year": "2004",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01394322",
"articleId": "12OmNvAAtAU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01394324",
"articleId": "12OmNvAAtwk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/csie/2009/3507/6/3507f349",
"title": "Fast Video Mosaic Construction for Observation of Large Static Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507f349/12OmNAZOJUH",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2005/2479/0/24790198",
"title": "Content-Based 3D Mosaic Representation for Video of Dynamic 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2005/24790198/12OmNBfZSmR",
"parentPublication": {
"id": "proceedings/aipr/2005/2479/0",
"title": "34th Applied Imagery and Pattern Recognition Workshop (AIPR'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761470",
"title": "Synthesizing 3D videos by a motion-conditioned background mosaic",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761470/12OmNC8uRjD",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011974",
"title": "A video completion method based on bandlet transform",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011974/12OmNCxbXAK",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1999/0149/1/01491002",
"title": "A Projective Framework for Scene Segmentation in the Presence of Moving Objects",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1999/01491002/12OmNqHqSzk",
"parentPublication": {
"id": "proceedings/cvpr/1999/0149/2",
"title": "Proceedings. 1999 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No PR00149)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2000/0662/2/06622160",
"title": "Scene Modeling for Wide Area Surveillance and Image Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2000/06622160/12OmNqI04K5",
"parentPublication": {
"id": "proceedings/cvpr/2000/0662/2",
"title": "Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No.PR00662)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2012/4789/0/4789a481",
"title": "A Simple and Fast Moving Object Segmentation Based on H.264 Compressed Domain Information",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2012/4789a481/12OmNx7XHaZ",
"parentPublication": {
"id": "proceedings/iccis/2012/4789/0",
"title": "2012 Fourth International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2001/1007/0/10070363",
"title": "Mosaic and Warping for Forward Moving Images",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2001/10070363/12OmNyS6RPl",
"parentPublication": {
"id": "proceedings/cgi/2001/1007/0",
"title": "Proceedings. Computer Graphics International 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/psivt/2010/4285/0/4285a456",
"title": "Moving Objects Detection and Tracking Framework for UAV-based Surveillance",
"doi": null,
"abstractUrl": "/proceedings-article/psivt/2010/4285a456/12OmNyrqzxi",
"parentPublication": {
"id": "proceedings/psivt/2010/4285/0",
"title": "Image and Video Technology, Pacific-Rim Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420605",
"title": "Mosaic based representations of video sequences and their applications",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420605/12OmNzvQHXV",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwbcJ4F",
"title": "Digital Image Computing: Techniques and Applications (DICTA'05)",
"acronym": "dicta",
"groupId": "1001512",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzxgHFH",
"doi": "10.1109/DICTA.2005.25",
"title": "Correcting Flicker in Color Motion Pictures",
"normalizedTitle": "Correcting Flicker in Color Motion Pictures",
"abstract": "Despite active research work in the field of flicker correction, all the published work to date only deals with black and white motion pictures. In this paper, we will take flicker correction into the color space and propose a method to solve the flicker correction problem in color motion pictures. The method is based on a simplified flicker parameter model developed by us in [5] which was originally used for flicker correction in black and white motion pictures. Test results show that the proposed method is very effective in correcting flicker-damaged color films.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Despite active research work in the field of flicker correction, all the published work to date only deals with black and white motion pictures. In this paper, we will take flicker correction into the color space and propose a method to solve the flicker correction problem in color motion pictures. The method is based on a simplified flicker parameter model developed by us in [5] which was originally used for flicker correction in black and white motion pictures. Test results show that the proposed method is very effective in correcting flicker-damaged color films.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Despite active research work in the field of flicker correction, all the published work to date only deals with black and white motion pictures. In this paper, we will take flicker correction into the color space and propose a method to solve the flicker correction problem in color motion pictures. The method is based on a simplified flicker parameter model developed by us in [5] which was originally used for flicker correction in black and white motion pictures. Test results show that the proposed method is very effective in correcting flicker-damaged color films.",
"fno": "24670040",
"keywords": [
"Color",
"Motion Pictures",
"Equations",
"1 F Noise",
"Space Technology",
"Testing",
"Parameter Estimation",
"Image Restoration",
"Digital Images",
"Computer Applications"
],
"authors": [
{
"affiliation": "Nanyang Technological University",
"fullName": "K.K. Wong",
"givenName": "K.K.",
"surname": "Wong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "A. Das",
"givenName": "A.",
"surname": "Das",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "M.N. Chong",
"givenName": "M.N.",
"surname": "Chong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dicta",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-12-01T00:00:00",
"pubType": "proceedings",
"pages": "40-40",
"year": "2005",
"issn": null,
"isbn": "0-7695-2467-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "24670039",
"articleId": "12OmNBpVPUY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "24670041",
"articleId": "12OmNwHyZWl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iihmsp/2006/2745/0/04041674",
"title": "Dual-Plane Watermarking for Color Pictures Immune to Rotation, Scale, Translation, and Random Bending",
"doi": null,
"abstractUrl": "/proceedings-article/iihmsp/2006/04041674/12OmNAS9zKd",
"parentPublication": {
"id": "proceedings/iihmsp/2006/2745/0",
"title": "2006 International Conference on Intelligent Information Hiding and Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410504",
"title": "Improved flicker removal through motion vectors compensation",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410504/12OmNqBbHS1",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2014/5921/0/07041934",
"title": "Representing pictures with sound",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2014/07041934/12OmNxFJXMD",
"parentPublication": {
"id": "proceedings/aipr/2014/5921/0",
"title": "2014 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2010/6425/0/05453512",
"title": "Two-Step Coding for High Definition Video Compression",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2010/05453512/12OmNxFJXNl",
"parentPublication": {
"id": "proceedings/dcc/2010/6425/0",
"title": "2010 Data Compression Conference (DCC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2016/9919/0/07459167",
"title": "Flicker sensitive motion tuned video quality assessment",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2016/07459167/12OmNxw5BpL",
"parentPublication": {
"id": "proceedings/ssiai/2016/9919/0",
"title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2009/3651/0/3651a001",
"title": "Optical Flow from Motion Blurred Color Images",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2009/3651a001/12OmNySosIy",
"parentPublication": {
"id": "proceedings/crv/2009/3651/0",
"title": "2009 Canadian Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1992/0532/3/00226178",
"title": "Motion compensated frame rate conversion of motion pictures",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1992/00226178/12OmNyen1nQ",
"parentPublication": {
"id": "proceedings/icassp/1992/0532/3",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itcs/2009/3688/2/3688b402",
"title": "The Correction of Intensity Flicker in Archived Film",
"doi": null,
"abstractUrl": "/proceedings-article/itcs/2009/3688b402/12OmNyoiZ7I",
"parentPublication": {
"id": "proceedings/itcs/2009/3688/2",
"title": "Information Technology and Computer Science, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2017/1235/0/08457964",
"title": "Super-Resolution for Color Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2017/08457964/13xI8B2zWrM",
"parentPublication": {
"id": "proceedings/aipr/2017/1235/0",
"title": "2017 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2006/2745/0/04041674",
"title": "Dual-Plane Watermarking for Color Pictures Immune to Rotation, Scale, Translation, and Random Bending",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2006/04041674/17D45WaTkjc",
"parentPublication": {
"id": "proceedings/iih-msp/2006/2745/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19JE7TFMLdK",
"title": "2018 9th International Symposium on Parallel Architectures, Algorithms and Programming (PAAP)",
"acronym": "paap",
"groupId": "1800289",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19JE8RrLDs4",
"doi": "10.1109/PAAP.2018.00040",
"title": "A Post-Processing Approach in Moving Objects Detection via Feature Pyramid Networks",
"normalizedTitle": "A Post-Processing Approach in Moving Objects Detection via Feature Pyramid Networks",
"abstract": "Recent work has shown that Convolutional Neural Networks (CNNs) have great ability to deal with classification problems in pattern recognition field. Moving objects detection, regarding as a classification process, labels every pixel as a foreground pixel or a background pixel. In this paper, we proposed an effective post-processing approach, Residual Background Networks (ResBGNets), to improve the accuracy of moving objects detection in video sequences. Instead of learning the ground truth directly, our model learns the residual pictures between the results of existing methods and the ground truth. It benefits to understand the hidden character of each algorithm and correct the misclassification. Inside ResBGNets, we build Feature Pyramid Networks (FPN) to combine spatial information of the low-resolution level with semantical features of high-level of the high-resolution level. Evaluation performed on the 2014 CDnet dataset reveals that through our approach, most of the existing background subtraction methods can get better detection results and a significant higher FM score.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent work has shown that Convolutional Neural Networks (CNNs) have great ability to deal with classification problems in pattern recognition field. Moving objects detection, regarding as a classification process, labels every pixel as a foreground pixel or a background pixel. In this paper, we proposed an effective post-processing approach, Residual Background Networks (ResBGNets), to improve the accuracy of moving objects detection in video sequences. Instead of learning the ground truth directly, our model learns the residual pictures between the results of existing methods and the ground truth. It benefits to understand the hidden character of each algorithm and correct the misclassification. Inside ResBGNets, we build Feature Pyramid Networks (FPN) to combine spatial information of the low-resolution level with semantical features of high-level of the high-resolution level. Evaluation performed on the 2014 CDnet dataset reveals that through our approach, most of the existing background subtraction methods can get better detection results and a significant higher FM score.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent work has shown that Convolutional Neural Networks (CNNs) have great ability to deal with classification problems in pattern recognition field. Moving objects detection, regarding as a classification process, labels every pixel as a foreground pixel or a background pixel. In this paper, we proposed an effective post-processing approach, Residual Background Networks (ResBGNets), to improve the accuracy of moving objects detection in video sequences. Instead of learning the ground truth directly, our model learns the residual pictures between the results of existing methods and the ground truth. It benefits to understand the hidden character of each algorithm and correct the misclassification. Inside ResBGNets, we build Feature Pyramid Networks (FPN) to combine spatial information of the low-resolution level with semantical features of high-level of the high-resolution level. Evaluation performed on the 2014 CDnet dataset reveals that through our approach, most of the existing background subtraction methods can get better detection results and a significant higher FM score.",
"fno": "940300a191",
"keywords": [
"Feature Extraction",
"Object Detection",
"Adaptation Models",
"Convolutional Neural Networks",
"Video Sequences",
"Image Color Analysis",
"Training",
"Moving Objects Detection",
"Convolutional Neural Networks",
"Background Subtraction",
"Residual Pictures",
"Feature Pyramids"
],
"authors": [
{
"affiliation": null,
"fullName": "Li Lin",
"givenName": "Li",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bin Wang",
"givenName": "Bin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yinjuan Gu",
"givenName": "Yinjuan",
"surname": "Gu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "paap",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-12-01T00:00:00",
"pubType": "proceedings",
"pages": "191-195",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-9403-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "940300a183",
"articleId": "19JEajRia88",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "940300a196",
"articleId": "19JE9IY0s0g",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icndc/2012/4832/0/06386674",
"title": "An Effective Background Reconstruction Method for Video Objects Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icndc/2012/06386674/12OmNAXxX86",
"parentPublication": {
"id": "proceedings/icndc/2012/4832/0",
"title": "2012 Third International Conference on Networking and Distributed Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/2/01394323",
"title": "Mosaic based view enlargement for moving objects in moving pictures",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394323/12OmNwp74sZ",
"parentPublication": {
"id": "proceedings/icme/2004/8603/2",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06238924",
"title": "Background subtraction: Experiments and improvements for ViBe",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06238924/12OmNxw5Bjv",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icia/2006/0528/0/04097984",
"title": "A Codebook-based Video Moving Objects Detecting Method",
"doi": null,
"abstractUrl": "/proceedings-article/icia/2006/04097984/12OmNxymoaL",
"parentPublication": {
"id": "proceedings/icia/2006/0528/0",
"title": "2006 International Conference on Information Acquisition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a414",
"title": "Flexible Background Subtraction with Self-Balanced Local Sensitivity",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a414/12OmNzkuKJg",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2003/10/i1337",
"title": "Detecting Moving Objects, Ghosts, and Shadows in Video Streams",
"doi": null,
"abstractUrl": "/journal/tp/2003/10/i1337/13rRUwInuXj",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545597",
"title": "Multi-scale Recurrent Encoder-Decoder Network for Dense Temporal Classification",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545597/17D45X7VTga",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2020/7083/0/708300a016",
"title": "An Improved Moving Target Detection Algorithm Based on Vibe",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2020/708300a016/1oCn2bhTMCk",
"parentPublication": {
"id": "proceedings/iccnea/2020/7083/0",
"title": "2020 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichci/2020/2316/0/231600a254",
"title": "Collaborative Gaussian mixture model for background subtraction",
"doi": null,
"abstractUrl": "/proceedings-article/ichci/2020/231600a254/1tuAaFuZ4n6",
"parentPublication": {
"id": "proceedings/ichci/2020/2316/0",
"title": "2020 International Conference on Intelligent Computing and Human-Computer Interaction (ICHCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100d892",
"title": "JanusNet: Detection of Moving Objects from UAV Platforms",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100d892/1yNhKNB2avS",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1rCg5NWvMis",
"title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"acronym": "icaice",
"groupId": "1840544",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1rCgbo4UUk8",
"doi": "10.1109/ICAICE51518.2020.00077",
"title": "Video Image Fire Recognition Based on Color Space and Moving Object Detection",
"normalizedTitle": "Video Image Fire Recognition Based on Color Space and Moving Object Detection",
"abstract": "Flame recognition based on video image is an important method for fire detection. In order to improve the accuracy of flame recognition and the applicability of complex scenes, the flame color model was improved on the basis of RGB and HSI color space models, and the flame color model with adaptive threshold values was proposed for different background spaces, which could be adapted to the extraction of suspected flame areas in different environments. The flame has motion characteristics during combustion. ViBe(Visual Background Extractor) algorithm can quickly identify moving objects, but it cannot detect moving objects quickly when the first frame of the image contains moving objects. In this paper, an improved ViBe algorithm is proposed. Frame difference method is used to build the background model through the difference of the first two frames. The method of combining three frame difference and VIBE algorithm can reduce the influence of noise. The hole in the target graph is solved through image morphology processing. Experiments show that the algorithm can identify the flame region accurately and quickly.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Flame recognition based on video image is an important method for fire detection. In order to improve the accuracy of flame recognition and the applicability of complex scenes, the flame color model was improved on the basis of RGB and HSI color space models, and the flame color model with adaptive threshold values was proposed for different background spaces, which could be adapted to the extraction of suspected flame areas in different environments. The flame has motion characteristics during combustion. ViBe(Visual Background Extractor) algorithm can quickly identify moving objects, but it cannot detect moving objects quickly when the first frame of the image contains moving objects. In this paper, an improved ViBe algorithm is proposed. Frame difference method is used to build the background model through the difference of the first two frames. The method of combining three frame difference and VIBE algorithm can reduce the influence of noise. The hole in the target graph is solved through image morphology processing. Experiments show that the algorithm can identify the flame region accurately and quickly.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Flame recognition based on video image is an important method for fire detection. In order to improve the accuracy of flame recognition and the applicability of complex scenes, the flame color model was improved on the basis of RGB and HSI color space models, and the flame color model with adaptive threshold values was proposed for different background spaces, which could be adapted to the extraction of suspected flame areas in different environments. The flame has motion characteristics during combustion. ViBe(Visual Background Extractor) algorithm can quickly identify moving objects, but it cannot detect moving objects quickly when the first frame of the image contains moving objects. In this paper, an improved ViBe algorithm is proposed. Frame difference method is used to build the background model through the difference of the first two frames. The method of combining three frame difference and VIBE algorithm can reduce the influence of noise. The hole in the target graph is solved through image morphology processing. Experiments show that the algorithm can identify the flame region accurately and quickly.",
"fno": "914600a367",
"keywords": [
"Feature Extraction",
"Fires",
"Flames",
"Image Colour Analysis",
"Image Motion Analysis",
"Image Recognition",
"Image Segmentation",
"Object Detection",
"Video Signal Processing",
"Image Morphology Processing",
"Flame Region",
"Video Image Fire Recognition",
"Color Space",
"Moving Object Detection",
"Flame Recognition",
"Fire Detection",
"Flame Color Model",
"Adaptive Threshold Values",
"Different Background Spaces",
"Suspected Flame Areas",
"Improved Vi Be Algorithm",
"Frame Difference Method",
"Background Model",
"Adaptation Models",
"Image Recognition",
"Image Color Analysis",
"Target Recognition",
"Fires",
"Morphology",
"Object Detection",
"Flame Detection",
"Color Model",
"Moving Target Detection"
],
"authors": [
{
"affiliation": "Shengli College China University of Petroleum,School of Mechanical and Control Engineering,Dongying,China",
"fullName": "Zhang Qian",
"givenName": "Zhang",
"surname": "Qian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shengli College China University of Petroleum,School of Mechanical and Control Engineering,Dongying,China",
"fullName": "Liu Xiao-jun",
"givenName": "Liu",
"surname": "Xiao-jun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shengli College China University of Petroleum,School of Mechanical and Control Engineering,Dongying,China",
"fullName": "Huang Lei",
"givenName": "Huang",
"surname": "Lei",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icaice",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-10-01T00:00:00",
"pubType": "proceedings",
"pages": "367-371",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9146-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "914600a363",
"articleId": "1rCgchUMKGY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "914600a372",
"articleId": "1rCg8jV5KKs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2011/348/0/06012201",
"title": "Multi-sensor fire detection using visual and time-of-flight imaging",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012201/12OmNCw3z7y",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciicii/2016/3575/0/3575a091",
"title": "Moving Object Tracking Method Based on Improved Camshift Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iciicii/2016/3575a091/12OmNwIHoBl",
"parentPublication": {
"id": "proceedings/iciicii/2016/3575/0",
"title": "2016 International Conference on Industrial Informatics - Computing Technology, Intelligent Technology, Industrial Information Integration (ICIICII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2016/3906/0/3906a370",
"title": "A Method of Fire and Smoke Detection Based on Surendra Background and Gray Bitmap Plane Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2016/3906a370/12OmNxbEtMN",
"parentPublication": {
"id": "proceedings/itme/2016/3906/0",
"title": "2016 8th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iihmsp/2006/2745/0/04041737",
"title": "A Fire-Alarming Method Based on Video Processing",
"doi": null,
"abstractUrl": "/proceedings-article/iihmsp/2006/04041737/12OmNxiKrVy",
"parentPublication": {
"id": "proceedings/iihmsp/2006/2745/0",
"title": "2006 International Conference on Intelligent Information Hiding and Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945741",
"title": "A fire color mapping-based segmentation: Fire pixel segmentation approach",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945741/12OmNyp9Mlc",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-iscmht/2017/1023/0/08338569",
"title": "Particle filter based moving object tracking with adaptive observation model",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-iscmht/2017/08338569/12OmNzYwc5z",
"parentPublication": {
"id": "proceedings/iciev-iscmht/2017/1023/0",
"title": "2017 6th International Conference on Informatics, Electronics and Vision & 2017 7th International Symposium in Computational Medical and Health Technology (ICIEV-ISCMHT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicc-itoe/2010/5634/0/05439215",
"title": "Large Space Fire Image Processing of Improving Canny Edge Detector Based on Adaptive Smoothing",
"doi": null,
"abstractUrl": "/proceedings-article/cicc-itoe/2010/05439215/12OmNzt0IQj",
"parentPublication": {
"id": "proceedings/cicc-itoe/2010/5634/0",
"title": "2010 International Conference on Innovative Computing & Communication and 2010 Asia-Pacific Conference on Information Technology & Ocean Engineering, (CICC-ITOE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2019/4689/0/468900a289",
"title": "Video Based Fire Detection in Photovoltaic System",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2019/468900a289/1h0Fkqea1K8",
"parentPublication": {
"id": "proceedings/icmcce/2019/4689/0",
"title": "2019 4th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaaci/2020/9753/0/975300a106",
"title": "Research on Fire Detection and Image Information Processing System Based on Image Processing",
"doi": null,
"abstractUrl": "/proceedings-article/icaaci/2020/975300a106/1rlF3GxRfm8",
"parentPublication": {
"id": "proceedings/icaaci/2020/9753/0",
"title": "2020 International Conference on Advance in Ambient Computing and Intelligence (ICAACI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600a710",
"title": "Image recognition and tracking of flowing sub-flame in downward fire of building insulation materials : A Method based on image morphology, SVM and db-scan algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600a710/1x3kV48RF7i",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAiSpZ",
"title": "2015 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzYwcc3",
"doi": "10.1109/VR.2015.7223376",
"title": "I'm There! The influence of virtual reality and mixed reality environments combined with two different navigation methods on presence",
"normalizedTitle": "I'm There! The influence of virtual reality and mixed reality environments combined with two different navigation methods on presence",
"abstract": "For various VR/MR/AR applications, such as virtual usability studies, it is very important that the participants have the feeling that they are really in the environment. This feeling of “being” in a mediated environment is described as presence. Two important factors that influence presence are the level of immersion and the navigation method. We developed two navigation methods to simulate natural walking using a Wii Balance Board and a Kinect Sensor. In this preliminary study we examined the effects of these navigation methods and the level of immersion on the participants' perceived presence in a 2×2 factorial between-subjects study with 32 participants in two different VEs (Powerwall and Mixed-Reality-See-Through-Glasses). The results indicate that reported presence is higher for the Kinect navigation and Powerwall for some facets of presence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For various VR/MR/AR applications, such as virtual usability studies, it is very important that the participants have the feeling that they are really in the environment. This feeling of “being” in a mediated environment is described as presence. Two important factors that influence presence are the level of immersion and the navigation method. We developed two navigation methods to simulate natural walking using a Wii Balance Board and a Kinect Sensor. In this preliminary study we examined the effects of these navigation methods and the level of immersion on the participants' perceived presence in a 2×2 factorial between-subjects study with 32 participants in two different VEs (Powerwall and Mixed-Reality-See-Through-Glasses). The results indicate that reported presence is higher for the Kinect navigation and Powerwall for some facets of presence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For various VR/MR/AR applications, such as virtual usability studies, it is very important that the participants have the feeling that they are really in the environment. This feeling of “being” in a mediated environment is described as presence. Two important factors that influence presence are the level of immersion and the navigation method. We developed two navigation methods to simulate natural walking using a Wii Balance Board and a Kinect Sensor. In this preliminary study we examined the effects of these navigation methods and the level of immersion on the participants' perceived presence in a 2×2 factorial between-subjects study with 32 participants in two different VEs (Powerwall and Mixed-Reality-See-Through-Glasses). The results indicate that reported presence is higher for the Kinect navigation and Powerwall for some facets of presence.",
"fno": "07223376",
"keywords": [
"Navigation",
"Usability",
"Virtual Environments",
"Legged Locomotion",
"Mobile Communication",
"Smart Phones",
"Mixed Reality",
"Presence",
"User Studies",
"3 D Navigation"
],
"authors": [
{
"affiliation": "Institute for Machine Tools and Production Processes, Technische Universität Chemnitz",
"fullName": "Mario Lorenz",
"givenName": "Mario",
"surname": "Lorenz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Austrian Institute of Technology GmbH",
"fullName": "Marc Busch",
"givenName": "Marc",
"surname": "Busch",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Laboratory for Manufacturing Systems and Automation, University of Patras",
"fullName": "Loukas Rentzos",
"givenName": "Loukas",
"surname": "Rentzos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Austrian Institute of Technology GmbH",
"fullName": "Manfred Tscheligi",
"givenName": "Manfred",
"surname": "Tscheligi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute for Machine Tools and Production Processes, Technische Universität Chemnitz",
"fullName": "Philipp Klimant",
"givenName": "Philipp",
"surname": "Klimant",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Austrian Institute of Technology GmbH",
"fullName": "Peter Frohlich",
"givenName": "Peter",
"surname": "Frohlich",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "223-224",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1727-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07223375",
"articleId": "12OmNvnwVor",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07223377",
"articleId": "12OmNCcKQFn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549396",
"title": "Integration of spatial sound in immersive virtual environments an experimental study on effects of spatial sound on presence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549396/12OmNqNG3ev",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131742",
"title": "Methods to reduce cybersickness and enhance presence for in-place navigation techniques",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131742/12OmNyxFKaM",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223357",
"title": "Towards context-sensitive reorientation for real walking in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223357/12OmNzE54AN",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892237",
"title": "Exploring the effect of vibrotactile feedback through the floor on social presence in an immersive virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892237/12OmNzh5z4G",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577177",
"title": "Influence of hearing your steps and environmental sounds in VR while walking",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577177/17D45XoXP3w",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797801",
"title": "Spatial Presence in Real and Remote Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797801/1cJ10uVKWxW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798164",
"title": "Reducing Cybersickness by Geometry Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798164/1cJ1e7ULbji",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090521",
"title": "A Constrained Path Redirection for Passive Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090521/1jIxpAQuq8o",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a452",
"title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09629264",
"title": "Leaning-Based Interfaces Improve Ground-Based VR Locomotion in Reach-the-Target, Follow-the-Path, and Racing Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09629264/1yXvJdO9qaQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJbEwHHqEg",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJc05Lu2LS",
"doi": "10.1109/VR51125.2022.00041",
"title": "Design and Evaluation of Travel and Orientation Techniques for Desk VR",
"normalizedTitle": "Design and Evaluation of Travel and Orientation Techniques for Desk VR",
"abstract": "Typical VR interactions can be tiring, including standing up, walking, and mid-air gestures. Such interactions result in decreased comfort and session duration compared with traditional non-VR interfaces, which may, in turn, reduce productivity. Nevertheless, current approaches often neglect this aspect, making the VR experience not as promising as it can be. As we see it, desk VR experiences provide the convenience and comfort of a desktop experience and the benefits of VR immersion, being a good compromise between the overall experience and ergonomics. In this work, we explore navigation techniques targeted at desk VR users, using both controllers and a large multi-touch surface. We address travel and orientation techniques independently, considering only continuous approaches for travel as these are better suited for exploration and both continuous and discrete approaches for orientation. Results revealed advantages for a continuous controller-based travel method and a trend for a dragging-based orientation technique. Also, we identified possible trends towards task focus affecting overall cybersickness symptomatology.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Typical VR interactions can be tiring, including standing up, walking, and mid-air gestures. Such interactions result in decreased comfort and session duration compared with traditional non-VR interfaces, which may, in turn, reduce productivity. Nevertheless, current approaches often neglect this aspect, making the VR experience not as promising as it can be. As we see it, desk VR experiences provide the convenience and comfort of a desktop experience and the benefits of VR immersion, being a good compromise between the overall experience and ergonomics. In this work, we explore navigation techniques targeted at desk VR users, using both controllers and a large multi-touch surface. We address travel and orientation techniques independently, considering only continuous approaches for travel as these are better suited for exploration and both continuous and discrete approaches for orientation. Results revealed advantages for a continuous controller-based travel method and a trend for a dragging-based orientation technique. Also, we identified possible trends towards task focus affecting overall cybersickness symptomatology.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Typical VR interactions can be tiring, including standing up, walking, and mid-air gestures. Such interactions result in decreased comfort and session duration compared with traditional non-VR interfaces, which may, in turn, reduce productivity. Nevertheless, current approaches often neglect this aspect, making the VR experience not as promising as it can be. As we see it, desk VR experiences provide the convenience and comfort of a desktop experience and the benefits of VR immersion, being a good compromise between the overall experience and ergonomics. In this work, we explore navigation techniques targeted at desk VR users, using both controllers and a large multi-touch surface. We address travel and orientation techniques independently, considering only continuous approaches for travel as these are better suited for exploration and both continuous and discrete approaches for orientation. Results revealed advantages for a continuous controller-based travel method and a trend for a dragging-based orientation technique. Also, we identified possible trends towards task focus affecting overall cybersickness symptomatology.",
"fno": "961700a222",
"keywords": [
"Ergonomics",
"Virtual Reality",
"Typical VR Interactions",
"Mid Air Gestures",
"Decreased Comfort",
"Session Duration",
"Non VR Interfaces",
"VR Experience",
"Desk VR Experiences",
"Desktop Experience",
"VR Immersion",
"Ergonomics",
"Navigation Techniques",
"Desk VR Users",
"Multitouch Surface",
"Orientation Techniques",
"Continuous Controller Based Travel Method",
"Dragging Based Orientation Technique",
"Productivity",
"Legged Locomotion",
"Three Dimensional Displays",
"Navigation",
"Cybersickness",
"Ergonomics",
"Conferences",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Techniques",
"Computing Methodologies X 2014 Computer Graphics X 2014 Graphics Systems And Interfaces X 2014 Virtual Reality"
],
"authors": [
{
"affiliation": "Universidade do Porto,INESC TEC / Faculdade de Engenharia",
"fullName": "Guilherme Amaro",
"givenName": "Guilherme",
"surname": "Amaro",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade do Porto,INESC TEC / Faculdade de Engenharia",
"fullName": "Daniel Mendes",
"givenName": "Daniel",
"surname": "Mendes",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade do Porto,INESC TEC / Faculdade de Engenharia",
"fullName": "Rui Rodrigues",
"givenName": "Rui",
"surname": "Rodrigues",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "222-231",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9617-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJbZWFt5jG",
"name": "pvr202296170-09756776s1-mm_961700a222.zip",
"size": "82.7 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202296170-09756776s1-mm_961700a222.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "961700a213",
"articleId": "1CJbS2QCX5e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "961700a232",
"articleId": "1CJbNHnU8o0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460027",
"title": "Let your fingers do the walking: A unified approach for efficient short-, medium-, and long-distance travel in VR",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460027/12OmNqAU6Be",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2015/7905/0/7905a882",
"title": "Travel Agency Desk Support System Using Interest Degree",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2015/7905a882/12OmNylboBJ",
"parentPublication": {
"id": "proceedings/aina/2015/7905/0",
"title": "2015 IEEE 29th International Conference on Advanced Information Networking and Applications (AINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131766",
"title": "A multi-touch finger gesture based low-fatigue VR travel framework",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131766/12OmNzayNeN",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446383",
"title": "Simulated Reference Frame: A Cost-Effective Solution to Improve Spatial Orientation in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446383/13bd1fHrlRE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446300",
"title": "Human Compensation Strategies for Orientation Drifts",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446300/13bd1fdV4lD",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09744001",
"title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a693",
"title": "Systematic Design Space Exploration of Discrete Virtual Rotations in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a693/1CJbHGJZxeM",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a064",
"title": "A Short Description of an Ankle-Actuated Seated VR Locomotion Interface",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a064/1tnXf67lAWs",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523837",
"title": "The Cognitive Loads and Usability of Target-based and Steering-based Travel Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523837/1wpqBIpTeSs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523894",
"title": "Head-Mounted Display with Increased Downward Field of View Improves Presence and Sense of Self-Location",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523894/1wpqkPb7CSY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1e7ULbji",
"doi": "10.1109/VR.2019.8798164",
"title": "Reducing Cybersickness by Geometry Deformation",
"normalizedTitle": "Reducing Cybersickness by Geometry Deformation",
"abstract": "One major and well-known issue that occurs during VR experience is the appearance of cybersickness, which refrains users from accepting VR technologies. The induced cybersickness is due to a self-motion feeling that is produced when users see objects moving in the virtual world. To reduce cybersickness several methods have been proposed in the literature, however they do not guarantee immersion and navigation quality. In this paper, a new method to reduce cybersickness is proposed. The geometric deformation of the virtual model displayed in the peripheral field of view allows reducing the self-motion perceived by the user. Pilot test results show that visually induced self-motion is reduced with a guaranteed immersion quality while the user navigation parameters are kept.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One major and well-known issue that occurs during VR experience is the appearance of cybersickness, which refrains users from accepting VR technologies. The induced cybersickness is due to a self-motion feeling that is produced when users see objects moving in the virtual world. To reduce cybersickness several methods have been proposed in the literature, however they do not guarantee immersion and navigation quality. In this paper, a new method to reduce cybersickness is proposed. The geometric deformation of the virtual model displayed in the peripheral field of view allows reducing the self-motion perceived by the user. Pilot test results show that visually induced self-motion is reduced with a guaranteed immersion quality while the user navigation parameters are kept.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One major and well-known issue that occurs during VR experience is the appearance of cybersickness, which refrains users from accepting VR technologies. The induced cybersickness is due to a self-motion feeling that is produced when users see objects moving in the virtual world. To reduce cybersickness several methods have been proposed in the literature, however they do not guarantee immersion and navigation quality. In this paper, a new method to reduce cybersickness is proposed. The geometric deformation of the virtual model displayed in the peripheral field of view allows reducing the self-motion perceived by the user. Pilot test results show that visually induced self-motion is reduced with a guaranteed immersion quality while the user navigation parameters are kept.",
"fno": "08798164",
"keywords": [
"Human Factors",
"Virtual Reality",
"Self Motion Feeling",
"Virtual World",
"Navigation Quality",
"Geometric Deformation",
"Visually Induced Self Motion",
"User Navigation Parameters",
"Geometry Deformation",
"VR Experience",
"Induced Cybersickness",
"VR Technologies",
"Immersion Quality",
"Navigation",
"Strain",
"Legged Locomotion",
"Observers",
"Lattices",
"Geometry",
"Buildings",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Computing Methodologies",
"Computer Graphics",
"Shape Modeling",
"Mesh Geometry Models"
],
"authors": [
{
"affiliation": "Arts et Métiers, UBFC, HESAM, Institut Image, LISPEN EA 7515, 2 rue Thomas Dumorey, Chalon-sur-Saône, France",
"fullName": "Ruding Lou",
"givenName": "Ruding",
"surname": "Lou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Arts et Métiers, UBFC, HESAM, Institut Image, LISPEN EA 7515, 2 rue Thomas Dumorey, Chalon-sur-Saône, France",
"fullName": "Jean-Rémy Chardonnet",
"givenName": "Jean-Rémy",
"surname": "Chardonnet",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1058-1059",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798144",
"articleId": "1cJ0OKIZ8yc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797901",
"articleId": "1cJ13BSrOkU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892348",
"title": "Steering locomotion by vestibular perturbation in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09737429",
"title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089513",
"title": "Comparative Evaluation of the Effects of Motion Control on Cybersickness in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089513/1jIx7SE9LiM",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090495",
"title": "Automatic Detection of Cybersickness from Physiological Signal in a Virtual Roller Coaster Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090495/1jIximIpClq",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090510",
"title": "Towards an Immersive Virtual Simulation for Studying Cybersickness during Spatial Knowledge Acquisition",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090510/1jIxpN6Ecta",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090494",
"title": "A Deep Learning based Framework for Detecting and Reducing onset of Cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090494/1jIxuKp865y",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a400",
"title": "Automatic Detection and Prediction of Cybersickness Severity using Deep Neural Networks from user’s Physiological Signals",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a400/1pyswQ0oYOQ",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a156",
"title": "A new device to restore sensory congruency in virtual reality and to prevent cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a156/1tnWwDLMCAw",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a486",
"title": "Visual Techniques to Reduce Cybersickness in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a486/1tnXnofrJRu",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523894",
"title": "Head-Mounted Display with Increased Downward Field of View Improves Presence and Sense of Self-Location",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523894/1wpqkPb7CSY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyQYteY",
"title": "2017 IEEE 13th International Conference on e-Science (e-Science)",
"acronym": "e-science",
"groupId": "1001511",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB8Cj01",
"doi": "10.1109/eScience.2017.49",
"title": "Iterative Design and Evaluation of Regulatory Network Visualisation at Scale",
"normalizedTitle": "Iterative Design and Evaluation of Regulatory Network Visualisation at Scale",
"abstract": "Over the last decade, the development of a range of Next Generation Sequencing (NGS) technologies has led to an enormous increase in the size of the data sets available in molecular biology. The scale of these data presents new challenges for researchers, and visualisation is widely regarded as an essential tool for exploration and detailed analysis of candidate relationships. Inevitably, there are cognitive and technical limits on the information which may usefully be displayed on a particular device, and there may be some tension between the analytical utility of a representation and its coverage of the relationships available within the data. Careful attention must be given to the overall design of the visualisation, and to the channels selected, and these tasks are further complicated if the intent is to support interactive exploration by a number of collocated researchers or inclusion within a collaborative workflow. This paper is concerned with the design of a visualisation for regulatory interactions in bacteria, the complex relationships that exist between a set of proteins and the much larger set of genes whose action they control. Modelling these interactions yields equally complex network diagrams, and even classical hairball representations when visualised. In this work we explore the iterative refinement of an alternative visualisation for data of this kind, moving away from the traditional hairball to a 'field' of smaller structures, the intent being to support effective comparison across many dozens of strains and species rather than the exhaustive documentation of a full set of interactions for the one organism. While the study did not directly compare insights obtained using TRNDiff with those obtained using other tools, formal evaluations have allowed us to settle on an effective set of representations and visual channels, and interactive features to support analysis. Our approach has produced a far more effective visualisation of these important data sets, and offers useful lessons for tool developers and insights into the utility of touch devices and larger displays for visual analytics and generation of insight at scale.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Over the last decade, the development of a range of Next Generation Sequencing (NGS) technologies has led to an enormous increase in the size of the data sets available in molecular biology. The scale of these data presents new challenges for researchers, and visualisation is widely regarded as an essential tool for exploration and detailed analysis of candidate relationships. Inevitably, there are cognitive and technical limits on the information which may usefully be displayed on a particular device, and there may be some tension between the analytical utility of a representation and its coverage of the relationships available within the data. Careful attention must be given to the overall design of the visualisation, and to the channels selected, and these tasks are further complicated if the intent is to support interactive exploration by a number of collocated researchers or inclusion within a collaborative workflow. This paper is concerned with the design of a visualisation for regulatory interactions in bacteria, the complex relationships that exist between a set of proteins and the much larger set of genes whose action they control. Modelling these interactions yields equally complex network diagrams, and even classical hairball representations when visualised. In this work we explore the iterative refinement of an alternative visualisation for data of this kind, moving away from the traditional hairball to a 'field' of smaller structures, the intent being to support effective comparison across many dozens of strains and species rather than the exhaustive documentation of a full set of interactions for the one organism. While the study did not directly compare insights obtained using TRNDiff with those obtained using other tools, formal evaluations have allowed us to settle on an effective set of representations and visual channels, and interactive features to support analysis. Our approach has produced a far more effective visualisation of these important data sets, and offers useful lessons for tool developers and insights into the utility of touch devices and larger displays for visual analytics and generation of insight at scale.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Over the last decade, the development of a range of Next Generation Sequencing (NGS) technologies has led to an enormous increase in the size of the data sets available in molecular biology. The scale of these data presents new challenges for researchers, and visualisation is widely regarded as an essential tool for exploration and detailed analysis of candidate relationships. Inevitably, there are cognitive and technical limits on the information which may usefully be displayed on a particular device, and there may be some tension between the analytical utility of a representation and its coverage of the relationships available within the data. Careful attention must be given to the overall design of the visualisation, and to the channels selected, and these tasks are further complicated if the intent is to support interactive exploration by a number of collocated researchers or inclusion within a collaborative workflow. This paper is concerned with the design of a visualisation for regulatory interactions in bacteria, the complex relationships that exist between a set of proteins and the much larger set of genes whose action they control. Modelling these interactions yields equally complex network diagrams, and even classical hairball representations when visualised. In this work we explore the iterative refinement of an alternative visualisation for data of this kind, moving away from the traditional hairball to a 'field' of smaller structures, the intent being to support effective comparison across many dozens of strains and species rather than the exhaustive documentation of a full set of interactions for the one organism. While the study did not directly compare insights obtained using TRNDiff with those obtained using other tools, formal evaluations have allowed us to settle on an effective set of representations and visual channels, and interactive features to support analysis. Our approach has produced a far more effective visualisation of these important data sets, and offers useful lessons for tool developers and insights into the utility of touch devices and larger displays for visual analytics and generation of insight at scale.",
"fno": "08109154",
"keywords": [
"Biology Computing",
"Data Visualisation",
"Interactive Systems",
"Molecular Biophysics",
"Next Generation Sequencing Technologies",
"NGS",
"Regulatory Network Visualisation Iterative Design",
"Regulatory Network Visualisation Evaluation",
"Interactive Features",
"Formal Evaluations",
"Alternative Visualisation",
"Iterative Refinement",
"Classical Hairball Representations",
"Regulatory Interactions",
"Interactive Exploration",
"Technical Limits",
"Cognitive Limits",
"Molecular Biology",
"Visual Analytics",
"Data Visualization",
"Tools",
"Bioinformatics",
"Organisms",
"Visualization",
"Proteins",
"Strain"
],
"authors": [
{
"affiliation": null,
"fullName": "Samuel Thomas Smith",
"givenName": "Samuel Thomas",
"surname": "Smith",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "James Michael Hogan",
"givenName": "James Michael",
"surname": "Hogan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xin-Yi Chua",
"givenName": "Xin-Yi",
"surname": "Chua",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Margot Brereton",
"givenName": "Margot",
"surname": "Brereton",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Daniel Johnson",
"givenName": "Daniel",
"surname": "Johnson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Markus Rittenbruch",
"givenName": "Markus",
"surname": "Rittenbruch",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "e-science",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "354-363",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-2686-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08109153",
"articleId": "12OmNB0X8qi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08109155",
"articleId": "12OmNzd7bWh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/biovis/2012/4729/0/06378593",
"title": "Gene-RiViT: A visualization tool for comparative analysis of gene neighborhoods in prokaryotes",
"doi": null,
"abstractUrl": "/proceedings-article/biovis/2012/06378593/12OmNAQrYEK",
"parentPublication": {
"id": "proceedings/biovis/2012/4729/0",
"title": "2012 IEEE Symposium on Biological Data Visualization (BioVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2010/8306/0/05706538",
"title": "Characterization of structural features for small regulatory RNAs in Escherichia coli genomes",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2010/05706538/12OmNBEGYGQ",
"parentPublication": {
"id": "proceedings/bibm/2010/8306/0",
"title": "2010 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2017/1324/0/132401a151",
"title": "Modeling Global and local Codon Bias with Deep Language Models",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2017/132401a151/12OmNvAAtEz",
"parentPublication": {
"id": "proceedings/bibe/2017/1324/0",
"title": "2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571345",
"title": "Polytree Numbering for Citation Networks Visualisation",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571345/12OmNvjgWSA",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a133",
"title": "Interaction Visualisation of Complex Genomic Data with Game Engines",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a133/12OmNwvVrCG",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2018/04/07276997",
"title": "IAS: Interaction Specific GO Term Associations for Predicting Protein-Protein Interaction Networks",
"doi": null,
"abstractUrl": "/journal/tb/2018/04/07276997/13rRUx0geox",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vissoft/2018/8292/0/829200a127",
"title": "The Code Mini-Map Visualisation: Encoding Conceptual Structures Within Source Code",
"doi": null,
"abstractUrl": "/proceedings-article/vissoft/2018/829200a127/17D45Xtvp8o",
"parentPublication": {
"id": "proceedings/vissoft/2018/8292/0",
"title": "2018 IEEE Working Conference on Software Visualization (VISSOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809833",
"title": "OntoPlot: A Novel Visualisation for Non-hierarchical Associations in Large Ontologies",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809833/1cHEinBcEAo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a345",
"title": "The Compound Graph: A Case Study for Community Visualisation in Social Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a345/1cMFc7n35Ru",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2019/6303/0/09162356",
"title": "Big Data Visualisation - An Update until Today",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2019/09162356/1m6hPcxcycw",
"parentPublication": {
"id": "proceedings/csde/2019/6303/0",
"title": "2019 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqAU6tC",
"title": "2014 25th International Workshop on Database and Expert Systems Applications (DEXA)",
"acronym": "dexa",
"groupId": "1000180",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNroijeS",
"doi": "10.1109/DEXA.2014.19",
"title": "Protein Data Modelling for Concurrent Sequential Patterns",
"normalizedTitle": "Protein Data Modelling for Concurrent Sequential Patterns",
"abstract": "Protein sequences from the same family typically share common patterns which imply their structural function and biological relationship. The challenge of identifying protein motifs is often addressed through mining frequent item sets and sequential patterns, where post-processing is a useful technique. Earlier work has shown that Concurrent Sequential Patterns mining can be applied in bioinformatics, e.g. to detect frequently occurring concurrent protein sub-sequences. This paper presents a companion approach to data modelling and visualisation, applying it to real-world protein datasets from the PROSITE and NCBI databases. The results show the potential for graph-based modelling in representing the integration of higher level patterns common to all or nearly all of the protein sequences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Protein sequences from the same family typically share common patterns which imply their structural function and biological relationship. The challenge of identifying protein motifs is often addressed through mining frequent item sets and sequential patterns, where post-processing is a useful technique. Earlier work has shown that Concurrent Sequential Patterns mining can be applied in bioinformatics, e.g. to detect frequently occurring concurrent protein sub-sequences. This paper presents a companion approach to data modelling and visualisation, applying it to real-world protein datasets from the PROSITE and NCBI databases. The results show the potential for graph-based modelling in representing the integration of higher level patterns common to all or nearly all of the protein sequences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Protein sequences from the same family typically share common patterns which imply their structural function and biological relationship. The challenge of identifying protein motifs is often addressed through mining frequent item sets and sequential patterns, where post-processing is a useful technique. Earlier work has shown that Concurrent Sequential Patterns mining can be applied in bioinformatics, e.g. to detect frequently occurring concurrent protein sub-sequences. This paper presents a companion approach to data modelling and visualisation, applying it to real-world protein datasets from the PROSITE and NCBI databases. The results show the potential for graph-based modelling in representing the integration of higher level patterns common to all or nearly all of the protein sequences.",
"fno": "06974818",
"keywords": [
"Proteins",
"Data Mining",
"Databases",
"Biological System Modeling",
"Data Models",
"Amino Acids",
"Visualisation",
"Protein Sequences",
"Data Mining",
"Concurrent Sequential Patterns Con SP",
"Bioinformatics",
"Con SP Modelling",
"Biological Databases",
"Knowledge Representation"
],
"authors": [
{
"affiliation": null,
"fullName": "Jing Lu",
"givenName": "Jing",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Malcolm Keech",
"givenName": "Malcolm",
"surname": "Keech",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Cuiqing Wang",
"givenName": "Cuiqing",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dexa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "5-9",
"year": "2014",
"issn": "1529-4188",
"isbn": "978-1-4799-5721-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06974817",
"articleId": "12OmNvAAtn7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06974819",
"articleId": "12OmNzayNaB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/big-data/2014/5666/0/07004340",
"title": "Stochastic Finite Automata for the translation of DNA to protein",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2014/07004340/12OmNBOlle9",
"parentPublication": {
"id": "proceedings/big-data/2014/5666/0",
"title": "2014 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2013/1309/0/06732455",
"title": "SFAPS: An R package for structure/function analysis of protein sequences based on informational spectrum method",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2013/06732455/12OmNqGRG7L",
"parentPublication": {
"id": "proceedings/bibm/2013/1309/0",
"title": "2013 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2017/1324/0/132401a334",
"title": "Protein Structure Recognition by Means of Sequential Pattern Mining",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2017/132401a334/12OmNviHKeF",
"parentPublication": {
"id": "proceedings/bibe/2017/1324/0",
"title": "2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2017/1600/0/1600a043",
"title": "Mining Similarity-Aware Distinguishing Sequential Patterns from Biomedical Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2017/1600a043/12OmNwtn3Es",
"parentPublication": {
"id": "proceedings/dsc/2017/1600/0",
"title": "2017 IEEE Second International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2016/1611/0/07822758",
"title": "Prediction and analysis of hot region in protein-protein interactions",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2016/07822758/12OmNyoiYV6",
"parentPublication": {
"id": "proceedings/bibm/2016/1611/0",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2014/05/06767049",
"title": "Discovery of Spatially Cohesive Itemsets in Three-Dimensional Protein Structures",
"doi": null,
"abstractUrl": "/journal/tb/2014/05/06767049/13rRUwvT9fa",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2017/01/07390043",
"title": "Extracting Coevolutionary Features from Protein Sequences for Predicting Protein-Protein Interactions",
"doi": null,
"abstractUrl": "/journal/tb/2017/01/07390043/13rRUx0xPtV",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2014/03/06746078",
"title": "Aligning and Clustering Patterns to Reveal the Protein Functionality of Sequences",
"doi": null,
"abstractUrl": "/journal/tb/2014/03/06746078/13rRUy0qnKe",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/5555/01/09782576",
"title": "Improved DNA-versus-Protein Homology Search for Protein Fossils",
"doi": null,
"abstractUrl": "/journal/tb/5555/01/09782576/1DGRVVt3UfC",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412055",
"title": "Interpretable Structured Learning with Sparse Gated Sequence Encoder for Protein-Protein Interaction Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412055/1tmk11cXnQ4",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNz4BdvV",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx4gUkE",
"doi": "10.1109/ICMEW.2012.101",
"title": "A Visual Search User Study on the Influences of Aspect Ratio Distortion of Preview Thumbnails",
"normalizedTitle": "A Visual Search User Study on the Influences of Aspect Ratio Distortion of Preview Thumbnails",
"abstract": "Most image and video retrieval tools used for large-scale media collections present query results as thumbnails arranged in a grid-like display with each thumbnail preserving the aspect ratio of its corresponding source image or video. Often, the outcome of a query is a set of thumbnails with different aspect ratios, thus a varying amount of padding space is used between the thumbnails in the display. This results in a visually erratic display that conflicts with interface design rules and aesthetic principles stipulating alignment and the usage of straight visual lines to guide the human eye while scanning the display. A solution is to create equally sized thumbnails by using cropping algorithms. However, this may remove useful search information. We investigated a simple alternative: to distort thumbnails to the same aspect ratio in order to provide a calm and structured display with straight lines between thumbnails. In a user experiment we evaluated whether and how much such a horizontal distortion can be applied without hampering visual search performance. The results show that distortion does not notably influence error rate and visual search time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most image and video retrieval tools used for large-scale media collections present query results as thumbnails arranged in a grid-like display with each thumbnail preserving the aspect ratio of its corresponding source image or video. Often, the outcome of a query is a set of thumbnails with different aspect ratios, thus a varying amount of padding space is used between the thumbnails in the display. This results in a visually erratic display that conflicts with interface design rules and aesthetic principles stipulating alignment and the usage of straight visual lines to guide the human eye while scanning the display. A solution is to create equally sized thumbnails by using cropping algorithms. However, this may remove useful search information. We investigated a simple alternative: to distort thumbnails to the same aspect ratio in order to provide a calm and structured display with straight lines between thumbnails. In a user experiment we evaluated whether and how much such a horizontal distortion can be applied without hampering visual search performance. The results show that distortion does not notably influence error rate and visual search time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most image and video retrieval tools used for large-scale media collections present query results as thumbnails arranged in a grid-like display with each thumbnail preserving the aspect ratio of its corresponding source image or video. Often, the outcome of a query is a set of thumbnails with different aspect ratios, thus a varying amount of padding space is used between the thumbnails in the display. This results in a visually erratic display that conflicts with interface design rules and aesthetic principles stipulating alignment and the usage of straight visual lines to guide the human eye while scanning the display. A solution is to create equally sized thumbnails by using cropping algorithms. However, this may remove useful search information. We investigated a simple alternative: to distort thumbnails to the same aspect ratio in order to provide a calm and structured display with straight lines between thumbnails. In a user experiment we evaluated whether and how much such a horizontal distortion can be applied without hampering visual search performance. The results show that distortion does not notably influence error rate and visual search time.",
"fno": "06266442",
"keywords": [
"Graphical User Interfaces",
"Image Retrieval",
"Video Retrieval",
"Visual Search User Study",
"Large Scale Media Collections",
"Video Retrieval",
"Image Retrieval",
"Cropping Algorithms",
"Horizontal Distortion",
"Layout",
"Visualization",
"Error Analysis",
"Accuracy",
"Software",
"Analysis Of Variance",
"Image Recognition",
"Image Distortion",
"Visual Search",
"Image And Video Retrieval Tools",
"Graphical User Interfaces"
],
"authors": [
{
"affiliation": null,
"fullName": "David Ahlström",
"givenName": "David",
"surname": "Ahlström",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Klaus Schoeffmann",
"givenName": "Klaus",
"surname": "Schoeffmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-07-01T00:00:00",
"pubType": "proceedings",
"pages": "546-551",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2027-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06266441",
"articleId": "12OmNroij5o",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06266443",
"articleId": "12OmNrkjVoS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/tdscen/1989/2007/0/00068107",
"title": "Computing the orthographic projection aspect graph of solids of revolution",
"doi": null,
"abstractUrl": "/proceedings-article/tdscen/1989/00068107/12OmNAfy7Jz",
"parentPublication": {
"id": "proceedings/tdscen/1989/2007/0",
"title": "Proceedings. Workshop on Interpretation of 3D Scenes",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034c001",
"title": "Integrating Boundary and Center Correlation Filters for Visual Tracking with Aspect Ratio Variation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c001/12OmNAle6UB",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2007/2900/0/29000743",
"title": "A Practical Algorithm for Planar Straight-line Grid Drawings of General Trees with Linear Area and Arbitrary Aspect Ratio",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2007/29000743/12OmNB1wkOJ",
"parentPublication": {
"id": "proceedings/iv/2007/2900/0",
"title": "2007 11th International Conference Information Visualization (IV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2006/2630/0/26300032",
"title": "Aspect-Ratio Voronoi Diagram with Applications",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2006/26300032/12OmNBvkdmS",
"parentPublication": {
"id": "proceedings/isvd/2006/2630/0",
"title": "2006 3rd International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a848",
"title": "3D Storyboards for Interactive Visual Search",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a848/12OmNqBtiL3",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460038",
"title": "Curvature manipulation techniques in redirection using haptic cues",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460038/12OmNxTVU2T",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122276",
"title": "Arc Length-Based Aspect Ratio Selection",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122276/13rRUNvyatg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09672037",
"title": "Mi YouTube es Su YouTube? Analyzing the Cultures using YouTube Thumbnails of Popular Videos",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09672037/1A8jqkuRcWc",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600b833",
"title": "Aspect-Ratio-Preserving Multi-Patch Image Aesthetics Score Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600b833/1iTvoL6ZW00",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2682",
"title": "Learning to Learn Cropping Models for Different Aspect Ratio Requirements",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2682/1m3o8QfxwFG",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNB8Cj92",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxj23dE",
"doi": "10.1109/ICMEW.2014.6890710",
"title": "An image community detection method for hierarchical visualisation",
"normalizedTitle": "An image community detection method for hierarchical visualisation",
"abstract": "Better ways of representing the results of image search can be found rather than regular lists of thumbnails. For this purpose, we propose a hierarchical visualisation scheme with two stages. We utilise the notion of image community and aim to detect communities within a large set of images by means of a novel deterministic community detection method. After image communities are detected, the representative key images of these communities are presented to the user in an intuitive and expressive layout. The layout is determined according to the detected community structure. As a result, the user is presented a distinctive set of images at the first stage. If similar images are desired, the members of the communities can be explored at the second stage. We experimentally show that the proposed community detection algorithm significantly outperforms generic community detection methods. Furthermore, we believe that the proposed hierarchical visualisation can be preferred by many of the users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Better ways of representing the results of image search can be found rather than regular lists of thumbnails. For this purpose, we propose a hierarchical visualisation scheme with two stages. We utilise the notion of image community and aim to detect communities within a large set of images by means of a novel deterministic community detection method. After image communities are detected, the representative key images of these communities are presented to the user in an intuitive and expressive layout. The layout is determined according to the detected community structure. As a result, the user is presented a distinctive set of images at the first stage. If similar images are desired, the members of the communities can be explored at the second stage. We experimentally show that the proposed community detection algorithm significantly outperforms generic community detection methods. Furthermore, we believe that the proposed hierarchical visualisation can be preferred by many of the users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Better ways of representing the results of image search can be found rather than regular lists of thumbnails. For this purpose, we propose a hierarchical visualisation scheme with two stages. We utilise the notion of image community and aim to detect communities within a large set of images by means of a novel deterministic community detection method. After image communities are detected, the representative key images of these communities are presented to the user in an intuitive and expressive layout. The layout is determined according to the detected community structure. As a result, the user is presented a distinctive set of images at the first stage. If similar images are desired, the members of the communities can be explored at the second stage. We experimentally show that the proposed community detection algorithm significantly outperforms generic community detection methods. Furthermore, we believe that the proposed hierarchical visualisation can be preferred by many of the users.",
"fno": "06890710",
"keywords": [
"Communities",
"Layout",
"Visualization",
"Image Edge Detection",
"Measurement",
"Equations",
"Indexes",
"Hierarchical Visualisation",
"Image Graph",
"Image Community"
],
"authors": [
{
"affiliation": "TUBITAK UZAY, Turkey",
"fullName": "Ersin Esen",
"givenName": "Ersin",
"surname": "Esen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TUBITAK UZAY, Turkey",
"fullName": "Savas Ozkan",
"givenName": "Savas",
"surname": "Ozkan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TUBITAK UZAY, Turkey",
"fullName": "Ilkay Atil",
"givenName": "Ilkay",
"surname": "Atil",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TUBITAK UZAY, Turkey",
"fullName": "Mehmet Ali Arabaci",
"givenName": "Mehmet Ali",
"surname": "Arabaci",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TUBITAK UZAY, Turkey",
"fullName": "Seda Tankiz",
"givenName": "Seda",
"surname": "Tankiz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2014",
"issn": "1945-7871",
"isbn": "978-1-4799-4717-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06890709",
"articleId": "12OmNqIhFMD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06890711",
"articleId": "12OmNzE54y6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/asonam/2014/5877/0/06921686",
"title": "Extension of Modularity Density for overlapping community structure",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2014/06921686/12OmNA1mbdx",
"parentPublication": {
"id": "proceedings/asonam/2014/5877/0",
"title": "2014 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2012/4905/0/4905b170",
"title": "Community-Affiliation Graph Model for Overlapping Network Community Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2012/4905b170/12OmNrJiCXg",
"parentPublication": {
"id": "proceedings/icdm/2012/4905/0",
"title": "2012 IEEE 12th International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdata-congress/2014/5057/0/06906847",
"title": "Multifaceted Visualisation of Annotated Social Media Data",
"doi": null,
"abstractUrl": "/proceedings-article/bigdata-congress/2014/06906847/12OmNs0C9L5",
"parentPublication": {
"id": "proceedings/bigdata-congress/2014/5057/0",
"title": "2014 IEEE International Congress on Big Data (BigData Congress)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2014/6513/0/6513a075",
"title": "Multi-dimensions of Developer Trustworthiness Assessment in OSS Community",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2014/6513a075/12OmNwCsdLt",
"parentPublication": {
"id": "proceedings/trustcom/2014/6513/0",
"title": "2014 IEEE 13th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2013/5108/0/5108b151",
"title": "Community Detection in Networks with Node Attributes",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2013/5108b151/12OmNx3Zjq2",
"parentPublication": {
"id": "proceedings/icdm/2013/5108/0",
"title": "2013 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdata-congress/2013/5006/0/06597146",
"title": "Data Abstraction and Visualisation in Next Step: Experiences from a Government Services Delivery Trial",
"doi": null,
"abstractUrl": "/proceedings-article/bigdata-congress/2013/06597146/12OmNxwWoJq",
"parentPublication": {
"id": "proceedings/bigdata-congress/2013/5006/0",
"title": "2013 IEEE International Congress on Big Data (BigData Congress)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981a185",
"title": "LPA Based Hierarchical Community Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981a185/12OmNz6iOal",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2015/11/07110600",
"title": "Tracking Temporal Community Strength in Dynamic Networks",
"doi": null,
"abstractUrl": "/journal/tk/2015/11/07110600/13rRUx0gevo",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a345",
"title": "The Compound Graph: A Case Study for Community Visualisation in Social Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a345/1cMFc7n35Ru",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933655",
"title": "Towards Quantifying Multiple View Layouts in Visualisation as Seen from Research Publications",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933655/1fTgHP2omAM",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAXxWQv",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"acronym": "bibm",
"groupId": "1001586",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzsJ7AU",
"doi": "10.1109/BIBM.2016.7822594",
"title": "Identifying protein complexes via multi-network clustering",
"normalizedTitle": "Identifying protein complexes via multi-network clustering",
"abstract": "The detection of protein complexes from protein-protein interaction (PPI) networks is an important step toward understanding the functional organization within cells. A great number of graph clustering algorithms have been proposed to undertake this task. Since PPI data collected by high-throughput technologies is quite noisy, simply applying graph clustering algorithms on PPI data is generally not adequate to achieve reliable prediction results. Behind protein interactions, there are protein domains that interact with each other. Jointly exploiting protein-protein interactions and domain-domain interactions (DDI) have the potential to increase the accuracy of protein complex detection. However, traditional graph clustering algorithms focus on clustering proteins within a single PPI network, and cannot make use of information inherent in other heterogeneous networks. In this paper, we proposed a novel generative model to perform multi-network clustering. Unlike previous protein complex detection algorithms that can only utilize the information within a single PPI network, our model is a flexible framework that can take into account PPIs, DDIs and domain-protein associations to achieve more consistent and reliable clustering results. Experiment results on real data demonstrate that our method performs much better than state-of-the-art protein complex detection techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The detection of protein complexes from protein-protein interaction (PPI) networks is an important step toward understanding the functional organization within cells. A great number of graph clustering algorithms have been proposed to undertake this task. Since PPI data collected by high-throughput technologies is quite noisy, simply applying graph clustering algorithms on PPI data is generally not adequate to achieve reliable prediction results. Behind protein interactions, there are protein domains that interact with each other. Jointly exploiting protein-protein interactions and domain-domain interactions (DDI) have the potential to increase the accuracy of protein complex detection. However, traditional graph clustering algorithms focus on clustering proteins within a single PPI network, and cannot make use of information inherent in other heterogeneous networks. In this paper, we proposed a novel generative model to perform multi-network clustering. Unlike previous protein complex detection algorithms that can only utilize the information within a single PPI network, our model is a flexible framework that can take into account PPIs, DDIs and domain-protein associations to achieve more consistent and reliable clustering results. Experiment results on real data demonstrate that our method performs much better than state-of-the-art protein complex detection techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The detection of protein complexes from protein-protein interaction (PPI) networks is an important step toward understanding the functional organization within cells. A great number of graph clustering algorithms have been proposed to undertake this task. Since PPI data collected by high-throughput technologies is quite noisy, simply applying graph clustering algorithms on PPI data is generally not adequate to achieve reliable prediction results. Behind protein interactions, there are protein domains that interact with each other. Jointly exploiting protein-protein interactions and domain-domain interactions (DDI) have the potential to increase the accuracy of protein complex detection. However, traditional graph clustering algorithms focus on clustering proteins within a single PPI network, and cannot make use of information inherent in other heterogeneous networks. In this paper, we proposed a novel generative model to perform multi-network clustering. Unlike previous protein complex detection algorithms that can only utilize the information within a single PPI network, our model is a flexible framework that can take into account PPIs, DDIs and domain-protein associations to achieve more consistent and reliable clustering results. Experiment results on real data demonstrate that our method performs much better than state-of-the-art protein complex detection techniques.",
"fno": "07822594",
"keywords": [
"Proteins",
"Clustering Algorithms",
"Protein Engineering",
"Mathematical Model",
"Prediction Algorithms",
"Linear Programming",
"Heterogeneous Networks",
"Multi Network Clustering",
"Protein Protein Interaction",
"Domain Domain Interaction",
"Protein Complex"
],
"authors": [
{
"affiliation": "College of Information Engineering, Shenzhen University, 518060, China",
"fullName": "Le Ou-Yang",
"givenName": "Le",
"surname": "Ou-Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Electronic and Engineering, City University of Hong Kong, Hong Kong",
"fullName": "Hong Yan",
"givenName": null,
"surname": "Hong Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Mathematics and Statistics & Hubei Key Laboratory of Mathematical Sciences, Central China Normal University, Wuhan 430079, China",
"fullName": "Xiao-Fei Zhang",
"givenName": "Xiao-Fei",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-12-01T00:00:00",
"pubType": "proceedings",
"pages": "645-650",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-1611-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07822593",
"articleId": "12OmNwF0C6m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07822595",
"articleId": "12OmNwBT1qT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2008/3165/0/3165a179",
"title": "myMCL: A Web Portal for Protein Complexes Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2008/3165a179/12OmNBcj5Dc",
"parentPublication": {
"id": "proceedings/cbms/2008/3165/0",
"title": "2008 21st IEEE International Symposium on Computer-Based Medical Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2016/1611/0/07822611",
"title": "Mining protein complexes based on topology potential from weighted dynamic PPI network",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2016/07822611/12OmNCzKlMI",
"parentPublication": {
"id": "proceedings/bibm/2016/1611/0",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2011/1799/0/06120506",
"title": "Identifying Protein Complexes from PPI Networks Using GO Semantic Similarity",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2011/06120506/12OmNvSKNQ4",
"parentPublication": {
"id": "proceedings/bibm/2011/1799/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2011/1799/0/06120420",
"title": "An Improved Graph Entropy-based Method for Identifying Protein Complexes",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2011/06120420/12OmNwCsdAM",
"parentPublication": {
"id": "proceedings/bibm/2011/1799/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2016/3834/0/3834a021",
"title": "Identifying Protein Complexes Method Based on Time-Sequenced Association and Ant Colony Clustering in Dynamic PPI Networks",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2016/3834a021/12OmNyqzLYC",
"parentPublication": {
"id": "proceedings/bibe/2016/3834/0",
"title": "2016 IEEE 16th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2010/8306/0/05706571",
"title": "Semi-supervised learning protein complexes from protein interaction networks",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2010/05706571/12OmNyuPKVV",
"parentPublication": {
"id": "proceedings/bibm/2010/8306/0",
"title": "2010 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2013/1309/0/06732607",
"title": "A clustering algorithm for identifying hierarchical and overlapping protein complexes in large PPI networks",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2013/06732607/12OmNzFdt2T",
"parentPublication": {
"id": "proceedings/bibm/2013/1309/0",
"title": "2013 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2018/5892/0/08466500",
"title": "An Improved Prorank Algorithm for Detection of Overlapping Protein Complexes using Protein Interaction and Gene Expression Datasets in Yeast",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2018/08466500/13JkraaHIPL",
"parentPublication": {
"id": "proceedings/icis/2018/5892/0",
"title": "2018 IEEE/ACIS 17th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2014/03/06718040",
"title": "Detecting Protein Complexes Based on Uncertain Graph Model",
"doi": null,
"abstractUrl": "/journal/tb/2014/03/06718040/13rRUwh80t5",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2015/06/07035059",
"title": "Detecting Protein Complexes from Signed Protein-Protein Interaction Networks",
"doi": null,
"abstractUrl": "/journal/tb/2015/06/07035059/13rRUxAATf8",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.