data dict |
|---|
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysoFwQTRe",
"doi": "10.1109/ISMAR-Adjunct.2019.00045",
"title": "Design of Paper Book Oriented Augmented Reality Collaborative Annotation System for Science Education",
"normalizedTitle": "Design of Paper Book Oriented Augmented Reality Collaborative Annotation System for Science Education",
"abstract": "The authors designed a kind of augmented reality annotation system based on network knowledge collaboration for primary science education to expand the cognitive effect. Various types of annotations such as text, images, videos, links, 3D models, etc. can be added to the corresponding position of the paper book by multi-user and multi-device through the system. And the annotation contents could be retrieved in AR mode by other users. The connotation and dimension of scientific knowledge could be expanded through concentrating diversified annotations. The system only records the relative position of the annotation on the page by a hand-aided registration, and uploads the location information to the server without infringing the copyright of the book. The system allows users to add links as annotations, through which users could interact with social media and knowledge communities. By using this system, users' ideas could be connected thus promote the flow of knowledge between different types of readers (such as students, parents, and teachers), readers and authors and it is conducive to the exchange and inspiration of ideas, promoting the integration of knowledge and the generation of group wisdom.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The authors designed a kind of augmented reality annotation system based on network knowledge collaboration for primary science education to expand the cognitive effect. Various types of annotations such as text, images, videos, links, 3D models, etc. can be added to the corresponding position of the paper book by multi-user and multi-device through the system. And the annotation contents could be retrieved in AR mode by other users. The connotation and dimension of scientific knowledge could be expanded through concentrating diversified annotations. The system only records the relative position of the annotation on the page by a hand-aided registration, and uploads the location information to the server without infringing the copyright of the book. The system allows users to add links as annotations, through which users could interact with social media and knowledge communities. By using this system, users' ideas could be connected thus promote the flow of knowledge between different types of readers (such as students, parents, and teachers), readers and authors and it is conducive to the exchange and inspiration of ideas, promoting the integration of knowledge and the generation of group wisdom.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The authors designed a kind of augmented reality annotation system based on network knowledge collaboration for primary science education to expand the cognitive effect. Various types of annotations such as text, images, videos, links, 3D models, etc. can be added to the corresponding position of the paper book by multi-user and multi-device through the system. And the annotation contents could be retrieved in AR mode by other users. The connotation and dimension of scientific knowledge could be expanded through concentrating diversified annotations. The system only records the relative position of the annotation on the page by a hand-aided registration, and uploads the location information to the server without infringing the copyright of the book. The system allows users to add links as annotations, through which users could interact with social media and knowledge communities. By using this system, users' ideas could be connected thus promote the flow of knowledge between different types of readers (such as students, parents, and teachers), readers and authors and it is conducive to the exchange and inspiration of ideas, promoting the integration of knowledge and the generation of group wisdom.",
"fno": "476500a417",
"keywords": [
"Augmented Reality",
"Computer Aided Instruction",
"Groupware",
"Human Computer Interaction",
"Natural Sciences Computing",
"Social Networking Online",
"Scientific Knowledge",
"Social Media",
"Knowledge Communities",
"Paper Book Oriented Augmented Reality Collaborative Annotation System",
"Augmented Reality Annotation System",
"Network Knowledge Collaboration",
"Primary Science Education",
"Annotation Content Retrieval",
"Annotations",
"Collaboration",
"Databases",
"Augmented Reality",
"Videos",
"Mobile Handsets",
"Education",
"Collaborative System Knowledge Collaboration Augmented Reality Annotation Science Education"
],
"authors": [
{
"affiliation": "University of Science and Technology of China",
"fullName": "YanXiang Zhang",
"givenName": "YanXiang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Li Tao",
"givenName": "Li",
"surname": "Tao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Yaping Lu",
"givenName": "Yaping",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Ying Li",
"givenName": "Ying",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "417-421",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a412",
"articleId": "1gysjBEtqk8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a422",
"articleId": "1gysmDWuePu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948459",
"title": "[Poster] Utilizing contact-view as an augmented reality authoring method for printed document annotation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948459/12OmNAlvI6d",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892383",
"title": "Gesture-based augmented reality annotation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892383/12OmNwJPMYX",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2000/0795/0/07950183",
"title": "Improvement of Panorama-Based Annotation Overlay Using Omnidirectional Vision and Inertial Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2000/07950183/12OmNzcxZkO",
"parentPublication": {
"id": "proceedings/iswc/2000/0795/0",
"title": "Digest of Papers. Fourth International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2008/3454/0/3454a509",
"title": "Multimedia Semantic Annotation Propagation",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2008/3454a509/12OmNzgwmKC",
"parentPublication": {
"id": "proceedings/ism/2008/3454/0",
"title": "2008 Tenth IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2013/2246/0/2246a387",
"title": "Seamless Annotation Display for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2013/2246a387/12OmNzkuKyK",
"parentPublication": {
"id": "proceedings/cw/2013/2246/0",
"title": "2013 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09994940",
"title": "TCM-SAS: A Semantic Annotation System and Knowledgebase of Traditional Chinese Medicine",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09994940/1JC2184at1u",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600f870",
"title": "Human-in-the-Loop Video Semantic Segmentation Auto-Annotation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600f870/1LiO7MVX61O",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scam/2020/9248/0/924800a132",
"title": "Annotation practices in Android apps",
"doi": null,
"abstractUrl": "/proceedings-article/scam/2020/924800a132/1oFGZoPXicU",
"parentPublication": {
"id": "proceedings/scam/2020/9248/0",
"title": "2020 IEEE 20th International Working Conference on Source Code Analysis and Manipulation (SCAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a371",
"title": "Annotation Tool for Precise Emotion Ground Truth Label Acquisition while Watching 360° VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a371/1qpzCZXhpS0",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2021/4106/0/410600a009",
"title": "Cross-platform annotation development for real-time collaborative learning",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2021/410600a009/1vK02DbfTlS",
"parentPublication": {
"id": "proceedings/icalt/2021/4106/0",
"title": "2021 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysvpmrClG",
"doi": "10.1109/ISMAR50242.2020.00081",
"title": "Collaborative Augmented Reality on Smartphones via Life-long City-scale Maps",
"normalizedTitle": "Collaborative Augmented Reality on Smartphones via Life-long City-scale Maps",
"abstract": "In this paper we present the first published end-to-end production computer-vision system for powering city-scale shared augmented reality experiences on mobile devices. In doing so we propose a new formulation for an experience-based mapping framework as an effective solution to the key issues of city-scale SLAM scalability, robustness, map updates and all-time all-weather performance required by a production system. Furthermore, we propose an effective way of synchronising SLAM systems to deliver seamless real-time localisation of multiple edge devices at the same time. All this in the presence of network latency and bandwidth limitations. The resulting system is deployed and tested at scale in San Francisco where it delivers AR experiences in a mapped area of several hundred kilometers. To foster further development of this area we offer the data set to the public, constituting the largest of this kind to date.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we present the first published end-to-end production computer-vision system for powering city-scale shared augmented reality experiences on mobile devices. In doing so we propose a new formulation for an experience-based mapping framework as an effective solution to the key issues of city-scale SLAM scalability, robustness, map updates and all-time all-weather performance required by a production system. Furthermore, we propose an effective way of synchronising SLAM systems to deliver seamless real-time localisation of multiple edge devices at the same time. All this in the presence of network latency and bandwidth limitations. The resulting system is deployed and tested at scale in San Francisco where it delivers AR experiences in a mapped area of several hundred kilometers. To foster further development of this area we offer the data set to the public, constituting the largest of this kind to date.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we present the first published end-to-end production computer-vision system for powering city-scale shared augmented reality experiences on mobile devices. In doing so we propose a new formulation for an experience-based mapping framework as an effective solution to the key issues of city-scale SLAM scalability, robustness, map updates and all-time all-weather performance required by a production system. Furthermore, we propose an effective way of synchronising SLAM systems to deliver seamless real-time localisation of multiple edge devices at the same time. All this in the presence of network latency and bandwidth limitations. The resulting system is deployed and tested at scale in San Francisco where it delivers AR experiences in a mapped area of several hundred kilometers. To foster further development of this area we offer the data set to the public, constituting the largest of this kind to date.",
"fno": "850800a533",
"keywords": [
"Augmented Reality",
"Global Positioning System",
"Groupware",
"Mobile Computing",
"Robot Vision",
"SLAM Robots",
"Smart Phones",
"Multiple Edge Devices",
"AR Experiences",
"Collaborative Augmented Reality",
"Life Long City Scale Maps",
"City Scale Shared Augmented Reality Experiences",
"Mobile Devices",
"Experience Based Mapping Framework",
"City Scale SLAM Scalability",
"Map Updates",
"End To End Production Computer Vision System",
"All Time All Weather Performance",
"Smartphones",
"Production Systems",
"Simultaneous Localization And Mapping",
"Scalability",
"Collaboration",
"Robustness",
"Augmented Reality",
"Smart Phones",
"Computer Vision",
"Augmented Reality",
"Structure From Motion",
"Large Scale SLAM",
"Computing Methodologies",
"Artificial Intelligence",
"Computer Vision",
"Tracking And Reconstruction",
"Computing Methodologies Mixed Augmented Reality"
],
"authors": [
{
"affiliation": null,
"fullName": "Lukas Platinsky",
"givenName": "Lukas",
"surname": "Platinsky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Michal Szabados",
"givenName": "Michal",
"surname": "Szabados",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Filip Hlasek",
"givenName": "Filip",
"surname": "Hlasek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ross Hemsley",
"givenName": "Ross",
"surname": "Hemsley",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Luca Del Pero",
"givenName": "Luca Del",
"surname": "Pero",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Andrej Pancik",
"givenName": "Andrej",
"surname": "Pancik",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bryan Baum",
"givenName": "Bryan",
"surname": "Baum",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hugo Grimmett",
"givenName": "Hugo",
"surname": "Grimmett",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Peter Ondruska",
"givenName": "Peter",
"surname": "Ondruska",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "533-541",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a520",
"articleId": "1pysxMcaE2Q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a542",
"articleId": "1pysx0C5zck",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isuvr/2010/4124/0/4124a005",
"title": "Simultaneous Localization and Mapping for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2010/4124a005/12OmNvA1hcT",
"parentPublication": {
"id": "proceedings/isuvr/2010/4124/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802069",
"title": "Decoupled mapping and localization for Augmented Reality on a mobile phone",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802069/12OmNxwWoMk",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07164332",
"title": "Instant Outdoor Localization and SLAM Initialization from 2.5D Maps",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07164332/13rRUxBa5c1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a602",
"title": "A Review of SLAM Techniques and Security in Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a602/18M7gCQ0uas",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956606",
"title": "From SLAM to CAD Maps and Back Using Generative Models",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956606/1IHp5V92abe",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hdis/2022/9144/0/09991394",
"title": "Pseudo Depth Maps for RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/hdis/2022/09991394/1JwQ1uhFF4s",
"parentPublication": {
"id": "proceedings/hdis/2022/9144/0",
"title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a594",
"title": "On the Redundancy Detection in Keyframe-Based SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a594/1ezRCsrH9Be",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a020",
"title": "A mapping of visual SLAM algorithms and their applications in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a020/1oZBEmzZIWY",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413161",
"title": "Towards life-long mapping of dynamic environments using temporal persistence modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413161/1tmhNHyik7e",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a171",
"title": "COVINS: Visual-Inertial SLAM for Centralized Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a171/1yeQzCgd2GQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBqMDo2",
"title": "Computer and Electrical Engineering, International Conference on",
"acronym": "iccee",
"groupId": "1002575",
"volume": "2",
"displayVolume": "2",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNscxj6f",
"doi": "10.1109/ICCEE.2009.22",
"title": "Research on Application of Virtual Reality Technology in Teaching and Training",
"normalizedTitle": "Research on Application of Virtual Reality Technology in Teaching and Training",
"abstract": "With the development of educational technology, more and more modern science and technology begins to be applied in teaching and training. Based on introduction of the definition and characters of virtual reality (VR) technology, the VR technology was introduced into the field of teaching and training. The application circumstance of VR in teaching and training was also discussed. Using VR technology, some kind of communication vehicle operation training system was designed. The key problems for operation environment construction, modeling and system running of the teaching and training system were also provided. The research not only offer novel idea for the construction of communication equipment operation training system, but also provide reference for the application of VR technology in others fields.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the development of educational technology, more and more modern science and technology begins to be applied in teaching and training. Based on introduction of the definition and characters of virtual reality (VR) technology, the VR technology was introduced into the field of teaching and training. The application circumstance of VR in teaching and training was also discussed. Using VR technology, some kind of communication vehicle operation training system was designed. The key problems for operation environment construction, modeling and system running of the teaching and training system were also provided. The research not only offer novel idea for the construction of communication equipment operation training system, but also provide reference for the application of VR technology in others fields.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the development of educational technology, more and more modern science and technology begins to be applied in teaching and training. Based on introduction of the definition and characters of virtual reality (VR) technology, the VR technology was introduced into the field of teaching and training. The application circumstance of VR in teaching and training was also discussed. Using VR technology, some kind of communication vehicle operation training system was designed. The key problems for operation environment construction, modeling and system running of the teaching and training system were also provided. The research not only offer novel idea for the construction of communication equipment operation training system, but also provide reference for the application of VR technology in others fields.",
"fno": "3925b077",
"keywords": [
"Virtual Reality",
"Teaching And Training",
"Application",
"Operation Training"
],
"authors": [
{
"affiliation": null,
"fullName": "Shuang Li",
"givenName": "Shuang",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccee",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-12-01T00:00:00",
"pubType": "proceedings",
"pages": "77-80",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3925-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3925b072",
"articleId": "12OmNBfZSk8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3925b081",
"articleId": "12OmNy49sLl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/1999/0210/0/02100032",
"title": "Virtual Reality and Augmented Reality as a Training Tool for Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100032/12OmNAObbyR",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ihmsc/2010/4151/2/4151b037",
"title": "Virtual Operation of Motor Based on the Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/ihmsc/2010/4151b037/12OmNzBwGoh",
"parentPublication": {
"id": "proceedings/ihmsc/2010/4151/2",
"title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise-ie/2021/3829/0/382900b596",
"title": "VR Technology Applied in The Teaching of Equipment Courses",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900b596/1C8GFPyCUCs",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itei/2021/8050/0/805000a232",
"title": "VR technology applied to traditional dance",
"doi": null,
"abstractUrl": "/proceedings-article/itei/2021/805000a232/1CzeG2lZvEI",
"parentPublication": {
"id": "proceedings/itei/2021/8050/0",
"title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2022/1666/0/166600a231",
"title": "Application of VR Technology in Virtual Simulation Experiment Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2022/166600a231/1KpBw9u2vT2",
"parentPublication": {
"id": "proceedings/icekim/2022/1666/0",
"title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccgiv/2022/9250/0/925000a165",
"title": "The Design and Development of Online Training System for Automobile Inspection and Maintenance Technology Specialty Based on Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/iccgiv/2022/925000a165/1LxfoiNIUWA",
"parentPublication": {
"id": "proceedings/iccgiv/2022/9250/0",
"title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ijcime/2019/5586/0/558600a409",
"title": "Construction of Art Training System Based on Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/ijcime/2019/558600a409/1j9wzcB8eTC",
"parentPublication": {
"id": "proceedings/ijcime/2019/5586/0",
"title": "2019 International Joint Conference on Information, Media and Engineering (IJCIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2021/4854/0/485400a129",
"title": "Japanese Teaching Environment Construction Based on Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2021/485400a129/1wB6NZeBii4",
"parentPublication": {
"id": "proceedings/icitbs/2021/4854/0",
"title": "2021 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tcs/2021/2910/0/291000a542",
"title": "A Feasibility Study on the Popularization of Tai Chi with Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/tcs/2021/291000a542/1wRIjFAPSk8",
"parentPublication": {
"id": "proceedings/tcs/2021/2910/0",
"title": "2021 International Conference on Information Technology and Contemporary Sports (TCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaie/2021/2492/0/249200a605",
"title": "Usefulness of virtual reality-based teaching to course teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icaie/2021/249200a605/1wV1FnPv1EQ",
"parentPublication": {
"id": "proceedings/icaie/2021/2492/0",
"title": "2021 2nd International Conference on Artificial Intelligence and Education (ICAIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1eY1x42",
"doi": "10.1109/VR.2018.8446312",
"title": "VR-Assisted vs Video-Assisted Teacher Training",
"normalizedTitle": "VR-Assisted vs Video-Assisted Teacher Training",
"abstract": "This paper compares teacher training in Virtual Reality (VR) to traditional approaches based on videos analysis and reflections. Our VR-assisted teacher training targets classroom management (CM) skills, using a low cost collaborative immersive VR platform. First results reveal a significant improvement using the VR approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper compares teacher training in Virtual Reality (VR) to traditional approaches based on videos analysis and reflections. Our VR-assisted teacher training targets classroom management (CM) skills, using a low cost collaborative immersive VR platform. First results reveal a significant improvement using the VR approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper compares teacher training in Virtual Reality (VR) to traditional approaches based on videos analysis and reflections. Our VR-assisted teacher training targets classroom management (CM) skills, using a low cost collaborative immersive VR platform. First results reveal a significant improvement using the VR approach.",
"fno": "08446312",
"keywords": [
"Computer Based Training",
"Teacher Training",
"Video Signal Processing",
"Virtual Reality",
"Video Assisted Teacher Training",
"Low Cost Collaborative Immersive VR Platform",
"VR Approach",
"Virtual Reality",
"Video Analysis",
"Classroom Management",
"Video Reflections",
"Seminars",
"Training",
"Virtual Reality",
"Videos",
"Collaboration",
"Graphical User Interfaces",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality"
],
"authors": [
{
"affiliation": "HCI Group, Univ. of Wurzburg, Wurzburg, Germany",
"fullName": "Jean-Luc Lugrin",
"givenName": "Jean-Luc",
"surname": "Lugrin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "HCI Group, Univ. of Wurzburg, Wurzburg, Germany",
"fullName": "Sebastian Oberdorfer",
"givenName": "Sebastian",
"surname": "Oberdorfer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg, HCI Group",
"fullName": "Marc Erich Latoschik",
"givenName": "Marc Erich",
"surname": "Latoschik",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg, Chair of School Pedagogy",
"fullName": "Alice Wittmann",
"givenName": "Alice",
"surname": "Wittmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg, Chair of School Pedagogy",
"fullName": "Christian Seufert",
"givenName": "Christian",
"surname": "Seufert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg, Chair of School Pedagogy",
"fullName": "Silke Grafe",
"givenName": "Silke",
"surname": "Grafe",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "625-626",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446222",
"articleId": "13bd1gCd7Sx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446052",
"articleId": "13bd1hyoTyc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/searis/2015/6881/0/07854095",
"title": "SimVR-Trei: A framework for developing vr-enhanced training",
"doi": null,
"abstractUrl": "/proceedings-article/searis/2015/07854095/12OmNzmclEN",
"parentPublication": {
"id": "proceedings/searis/2015/6881/0",
"title": "2015 IEEE 8th Workshop on Software Engineering and Architectures for Realtime Interactive Systems (SEARIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448286",
"title": "Teacher-Guided Educational VR: Assessment of Live and Prerecorded Teachers Guiding Virtual Field Trips",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448286/13bd1eSlyt6",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446614",
"title": "Using Industrial Robots as Haptic Devices for VR-Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446614/13bd1h03qOq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714120",
"title": "Synthesizing Personalized Construction Safety Training Scenarios for VR Training",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714120/1B0Y17ScN7G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/laclo/2018/0382/0/038200a385",
"title": "Teacher Interpretation in Programming Software Training for Classroom Application",
"doi": null,
"abstractUrl": "/proceedings-article/laclo/2018/038200a385/1cdOhCQmEKI",
"parentPublication": {
"id": "proceedings/laclo/2018/0382/0",
"title": "2018 XIII Latin American Conference on Learning Technologies (LACLO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412493",
"title": "Teacher-Student Training and Triplet Loss for Facial Expression Recognition under Occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412493/1tmjrEQEgTK",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a556",
"title": "Personal Identifiability of User Tracking Data During VR Training",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a556/1tnXbEAaBdm",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a030",
"title": "Immersive Multimodal and Procedurally-Assisted Creation of VR Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a030/1tnXheKhk1q",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a502",
"title": "Visual Indicators for Monitoring Students in a VR class",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a502/1tnXkpvZfqg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2021/4106/0/410600a373",
"title": "A Teacher Training Proposal for Classroom Conflict Management through Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2021/410600a373/1vK079Mjao0",
"parentPublication": {
"id": "proceedings/icalt/2021/4106/0",
"title": "2021 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1h03qOq",
"doi": "10.1109/VR.2018.8446614",
"title": "Using Industrial Robots as Haptic Devices for VR-Training",
"normalizedTitle": "Using Industrial Robots as Haptic Devices for VR-Training",
"abstract": "Many VR-training application require the integration of haptics, i.e. for surgical training. However, surgical VR-training is still limited to minimal invasive surgeries. For surgeries where high forces occur, like hip replacement, no VR-training applications have been developed. One cause for this is the lack of appropriate haptic devices which can deliver high forces. Novel industrial collaborative robots can provide high forces. Although, they lack control interfaces allowing to use them as haptic devices. We present 4 approaches for using these robots as general, multipurpose haptic input and output devices. The implemented approach was integrated into a VR hip replacement training application. An initial assessment demonstrates the general feasibility of our solution.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many VR-training application require the integration of haptics, i.e. for surgical training. However, surgical VR-training is still limited to minimal invasive surgeries. For surgeries where high forces occur, like hip replacement, no VR-training applications have been developed. One cause for this is the lack of appropriate haptic devices which can deliver high forces. Novel industrial collaborative robots can provide high forces. Although, they lack control interfaces allowing to use them as haptic devices. We present 4 approaches for using these robots as general, multipurpose haptic input and output devices. The implemented approach was integrated into a VR hip replacement training application. An initial assessment demonstrates the general feasibility of our solution.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many VR-training application require the integration of haptics, i.e. for surgical training. However, surgical VR-training is still limited to minimal invasive surgeries. For surgeries where high forces occur, like hip replacement, no VR-training applications have been developed. One cause for this is the lack of appropriate haptic devices which can deliver high forces. Novel industrial collaborative robots can provide high forces. Although, they lack control interfaces allowing to use them as haptic devices. We present 4 approaches for using these robots as general, multipurpose haptic input and output devices. The implemented approach was integrated into a VR hip replacement training application. An initial assessment demonstrates the general feasibility of our solution.",
"fno": "08446614",
"keywords": [
"Haptic Interfaces",
"Industrial Robots",
"Medical Robotics",
"Prosthetics",
"Surgery",
"Virtual Reality",
"Surgical VR Training",
"VR Hip Replacement Training Application",
"Haptic Devices",
"Industrial Collaborative Robots",
"Conferences",
"Virtual Reality",
"Three Dimensional Displays",
"User Interfaces",
"Haptics",
"Industrial Robot",
"Virtual Reality",
"Surgery",
"Medicine",
"Training",
"Applied Computing X 2192 Life And Medical Sciences",
"Human Centered Computing X 2192 Virtual Reality",
"Human Centered Computing X 2192 Haptic Devices"
],
"authors": [
{
"affiliation": "University of Technology, Institute for Machine Tools and Production Processes, Chemnitz",
"fullName": "Sebastian Knopp",
"givenName": "Sebastian",
"surname": "Knopp",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Orthopedics Trauma and Plastic Surgery University Clinics of Leipzig, Institute for Machine Tools and Production Processes University of Technology, Chemnitz",
"fullName": "Mario Lorenz",
"givenName": "Mario",
"surname": "Lorenz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Technology, Institute for Machine Tools and Production Processes, Chemnitz",
"fullName": "Luigi Pelliccia",
"givenName": "Luigi",
"surname": "Pelliccia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chemnitz University of Technology Fraunhofer Institute for Machine Tools and Forming Technology IWU, Institute for Machine Tools and Production Processes University of Technology, Chemnitz",
"fullName": "Philipp Klimant",
"givenName": "Philipp",
"surname": "Klimant",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "607-608",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446275",
"articleId": "13bd1gCd7Sy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446582",
"articleId": "13bd1h03qOp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ivs/2005/8961/0/01505201",
"title": "VR haptic interfaces for teleoperation: an evaluation study",
"doi": null,
"abstractUrl": "/proceedings-article/ivs/2005/01505201/12OmNx5piQE",
"parentPublication": {
"id": "proceedings/ivs/2005/8961/0",
"title": "2005 IEEE Intelligent Vehicles Symposium Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446462",
"title": "A Virtual Hip Replacement Surgery Simulator with Realistic Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446462/13bd1f3HvEK",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446399",
"title": "Three Haptic Shape-Feedback Controllers for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446399/13bd1fHrlRF",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/04/tth2012040344",
"title": "Impulse-Based Rendering Methods for Haptic Simulation of Bone-Burring",
"doi": null,
"abstractUrl": "/journal/th/2012/04/tth2012040344/13rRUwhHcQZ",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a083",
"title": "Tapping with a Handheld Stick in VR: Redirection Detection Thresholds for Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a083/1CJcjWU39wQ",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a592",
"title": "A Haptic Stimulation-Based Training Method to Improve the Quality of Motor Imagery EEG Signal in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a592/1MNgVlvp10Q",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798287",
"title": "Integrating Tactile Feedback in an Acetabular Reamer for Surgical VR-Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798287/1cJ0U048bMQ",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797918",
"title": "Virtual Reality Training with Passive Haptic Feedback for CryoEM Sample Preparation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797918/1cJ14ZjqmCQ",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a575",
"title": "The Effect of the Virtual Object Size on Weight Perception Augmented with Pseudo-Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a575/1tnWwW9JGXC",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a201",
"title": "Haptic-Enabled Buttons Through Adaptive Trigger Resistance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a201/1tnXoCxhKgw",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1AqxQLuHc6Q",
"title": "2021 2nd Asia Conference on Computers and Communications (ACCC)",
"acronym": "accc",
"groupId": "1840104",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1AqxR0BptlK",
"doi": "10.1109/ACCC54619.2021.00014",
"title": "Apply VR to Carry out Crew Escape Training",
"normalizedTitle": "Apply VR to Carry out Crew Escape Training",
"abstract": "The digital transformation of the shipping industry has brought significant challenges to ship-driving technology and new opportunities for crews. This report first analyzes the problems related to large ships' training and puts forward the necessity of efficient communication and computer support. The analysis and research of 3D imaging technology and computer-aided systems such as VR puts forward the evacuation model of the crew training with VR and optimizes the evacuation of the ship's members in the event of an accident, and analyzes and summarizes the advantages of VR training by examining the relevant products or systems such as Maersk training, Shenzhen Maritime Bureau and Videotel. VR supports crew safety writing needs to consider and eventually proposes a basic model for knowing related systems' development. The analysis methods mainly include literature investigation, video analysis, interview, and database analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The digital transformation of the shipping industry has brought significant challenges to ship-driving technology and new opportunities for crews. This report first analyzes the problems related to large ships' training and puts forward the necessity of efficient communication and computer support. The analysis and research of 3D imaging technology and computer-aided systems such as VR puts forward the evacuation model of the crew training with VR and optimizes the evacuation of the ship's members in the event of an accident, and analyzes and summarizes the advantages of VR training by examining the relevant products or systems such as Maersk training, Shenzhen Maritime Bureau and Videotel. VR supports crew safety writing needs to consider and eventually proposes a basic model for knowing related systems' development. The analysis methods mainly include literature investigation, video analysis, interview, and database analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The digital transformation of the shipping industry has brought significant challenges to ship-driving technology and new opportunities for crews. This report first analyzes the problems related to large ships' training and puts forward the necessity of efficient communication and computer support. The analysis and research of 3D imaging technology and computer-aided systems such as VR puts forward the evacuation model of the crew training with VR and optimizes the evacuation of the ship's members in the event of an accident, and analyzes and summarizes the advantages of VR training by examining the relevant products or systems such as Maersk training, Shenzhen Maritime Bureau and Videotel. VR supports crew safety writing needs to consider and eventually proposes a basic model for knowing related systems' development. The analysis methods mainly include literature investigation, video analysis, interview, and database analysis.",
"fno": "074300a045",
"keywords": [
"Computer Based Training",
"Emergency Services",
"Marine Safety",
"Ships",
"Virtual Reality",
"Apply VR",
"Carry",
"Crew Escape Training",
"Digital Transformation",
"Shipping Industry",
"Crews",
"Ships",
"Computer Support",
"3 D Imaging Technology",
"Computer Aided Systems",
"Evacuation Model",
"Crew Training",
"Ship",
"VR Training",
"Relevant Products",
"Maersk Training",
"Shenzhen Maritime Bureau",
"Crew Safety",
"Related Systems",
"Video Analysis",
"Database Analysis",
"Training",
"Solid Modeling",
"Three Dimensional Displays",
"Collaboration",
"Transportation",
"Virtual Reality",
"Production",
"Virtual Reality",
"Digital Transformation",
"CSCW",
"Training"
],
"authors": [
{
"affiliation": "School of Computer and Artificial Intelligence, Wuhan University of Technology,Wuhan,China",
"fullName": "Outong Li",
"givenName": "Outong",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Blockchains and Electronic Markets, University of Copenhagen,Copenhagen,Denmark",
"fullName": "Haiting Han",
"givenName": "Haiting",
"surname": "Han",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "accc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-09-01T00:00:00",
"pubType": "proceedings",
"pages": "45-50",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0743-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "074300a039",
"articleId": "1AqxUM539FS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "074300a051",
"articleId": "1AqxRx34GaY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2014/6854/0/6854a001",
"title": "A Novel Networked Marine Engine Simulator for Crew Operation Examination with Auto Evaluation Using Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2014/6854a001/12OmNvkpl4K",
"parentPublication": {
"id": "proceedings/icvrv/2014/6854/0",
"title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446312",
"title": "VR-Assisted vs Video-Assisted Teacher Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446312/13bd1eY1x42",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714120",
"title": "Synthesizing Personalized Construction Safety Training Scenarios for VR Training",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714120/1B0Y17ScN7G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itei/2021/8050/0/805000a232",
"title": "VR technology applied to traditional dance",
"doi": null,
"abstractUrl": "/proceedings-article/itei/2021/805000a232/1CzeG2lZvEI",
"parentPublication": {
"id": "proceedings/itei/2021/8050/0",
"title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798147",
"title": "[DC] Designing VR for Teamwork: The Influence of HMD VR Communication Capabilities on Teamwork Competencies",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798147/1cJ0HhK5ANW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798365",
"title": "EPICSAVE Lifesaving Decisions – a Collaborative VR Training Game Sketch for Paramedics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798365/1cJ1awkwF4A",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a313",
"title": "Virtual Reality Simulations for Hospital Fire Evacuation: A Systematic Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a313/1oZBAeVWiOs",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieit/2021/2563/0/256300a477",
"title": "Applied Research of VR Technology in Civil Engineering Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/ieit/2021/256300a477/1wHKq2RrP20",
"parentPublication": {
"id": "proceedings/ieit/2021/2563/0",
"title": "2021 International Conference on Internet, Education and Information Technology (IEIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aemcse/2021/1596/0/159600a921",
"title": "Design of the Fire Evacuation Training System for Underground Buildings Based on VR",
"doi": null,
"abstractUrl": "/proceedings-article/aemcse/2021/159600a921/1wcduh3N8KQ",
"parentPublication": {
"id": "proceedings/aemcse/2021/1596/0",
"title": "2021 4th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2021/4065/0/406500a159",
"title": "VR-based Training on Handling LNG Related Emergency in the Maritime Industry",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2021/406500a159/1yBF5Wqysak",
"parentPublication": {
"id": "proceedings/cw/2021/4065/0",
"title": "2021 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJdIvmlVrW",
"doi": "10.1109/VRW55335.2022.00114",
"title": "Group-based VR Training to Improve Hazard Recognition, Evaluation, and Control for Highway Construction Workers",
"normalizedTitle": "Group-based VR Training to Improve Hazard Recognition, Evaluation, and Control for Highway Construction Workers",
"abstract": "The construction industry spends approximately $15billion/year for occupational injuries, and highway sector is the most dangerous. Highway construction workers have to work in close proximity to construction equipment and high-speed traffic, exposing them to an elevated risk of serious injuries/fatalities. Safety training has a direct impact on the prevention of construction accidents. The traditional lecture-based construction training curriculum has not been revisited and is designed to train the workers individually, thus the benefits of collective engagement in worker training is ignored. High-engagement Virtual Reality (VR) environments offer a more effective learning experience for training workers to identify hazards in the job site. We present a training platform for instructor-in-the-loop, group-based VR training to complement and increase the effectiveness of the current training program for highway workers. We develop a VR platform in which an instructor can create and improvise on work zone scenarios and share the virtual scenario easily with the entire class.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The construction industry spends approximately $15billion/year for occupational injuries, and highway sector is the most dangerous. Highway construction workers have to work in close proximity to construction equipment and high-speed traffic, exposing them to an elevated risk of serious injuries/fatalities. Safety training has a direct impact on the prevention of construction accidents. The traditional lecture-based construction training curriculum has not been revisited and is designed to train the workers individually, thus the benefits of collective engagement in worker training is ignored. High-engagement Virtual Reality (VR) environments offer a more effective learning experience for training workers to identify hazards in the job site. We present a training platform for instructor-in-the-loop, group-based VR training to complement and increase the effectiveness of the current training program for highway workers. We develop a VR platform in which an instructor can create and improvise on work zone scenarios and share the virtual scenario easily with the entire class.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The construction industry spends approximately $15billion/year for occupational injuries, and highway sector is the most dangerous. Highway construction workers have to work in close proximity to construction equipment and high-speed traffic, exposing them to an elevated risk of serious injuries/fatalities. Safety training has a direct impact on the prevention of construction accidents. The traditional lecture-based construction training curriculum has not been revisited and is designed to train the workers individually, thus the benefits of collective engagement in worker training is ignored. High-engagement Virtual Reality (VR) environments offer a more effective learning experience for training workers to identify hazards in the job site. We present a training platform for instructor-in-the-loop, group-based VR training to complement and increase the effectiveness of the current training program for highway workers. We develop a VR platform in which an instructor can create and improvise on work zone scenarios and share the virtual scenario easily with the entire class.",
"fno": "840200a513",
"keywords": [
"Computer Aided Instruction",
"Computer Based Training",
"Construction Equipment",
"Construction Industry",
"Hazards",
"Industrial Training",
"Occupational Health",
"Occupational Safety",
"Personnel",
"Training",
"Virtual Reality",
"Group Based VR Training",
"Improve Hazard Recognition",
"Highway Construction Workers",
"Construction Industry",
"Occupational Injuries",
"Highway Sector",
"Construction Equipment",
"High Speed Traffic",
"Safety Training",
"Construction Accidents",
"Traditional Lecture Based Construction Training Curriculum",
"Worker Training",
"High Engagement Virtual Reality Environments",
"Training Platform",
"Current Training Program",
"Highway Workers",
"VR Platform",
"Training",
"Road Transportation",
"Three Dimensional Displays",
"Conferences",
"Virtual Reality",
"User Interfaces",
"Hazards",
"Virtual Reality",
"Construction Training",
"Group VR"
],
"authors": [
{
"affiliation": "Myers-Lawson School of Construction, Virginia Tech,Department of Building Construction,United States",
"fullName": "Nazila Roofigari-Esfahan",
"givenName": "Nazila",
"surname": "Roofigari-Esfahan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech,Environmental Health and Safety,United States",
"fullName": "Curt Porterfield",
"givenName": "Curt",
"surname": "Porterfield",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Libraries, Virginia Tech,United States",
"fullName": "Todd Ogle",
"givenName": "Todd",
"surname": "Ogle",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute for Creativity, Art and Technology, Virginia Tech,United States",
"fullName": "Tanner Upthegrove",
"givenName": "Tanner",
"surname": "Upthegrove",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech,Department of Industrial and Systems Engineering,United States",
"fullName": "Myounghoon Jeon",
"givenName": "Myounghoon",
"surname": "Jeon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech,Department of Computer Science,United States",
"fullName": "Sang Won Lee",
"givenName": "Sang Won",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "513-516",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a508",
"articleId": "1CJdWuMNPPy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a517",
"articleId": "1CJdc0rTv0c",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wism/2010/4224/2/05662347",
"title": "Research on Security Models of Highway Toll Network",
"doi": null,
"abstractUrl": "/proceedings-article/wism/2010/05662347/12OmNvIxeWP",
"parentPublication": {
"id": "proceedings/wism/2010/4224/2",
"title": "2010 International Conference on Web Information Systems and Mining (WISM 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic4e/2010/5680/0/05432578",
"title": "On the Traffic Administrative Department's Moral Hazard in the Operation Mechanism of Highway Franchise Based on Incomplete Contracts",
"doi": null,
"abstractUrl": "/proceedings-article/ic4e/2010/05432578/12OmNvjyxLd",
"parentPublication": {
"id": "proceedings/ic4e/2010/5680/0",
"title": "2010 International Conference on e-Education, e-Business, e-Management, and e-Learning, (IC4E)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2008/3357/2/3357c373",
"title": "Highway Construction Management Information Systems Based on Workflow",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2008/3357c373/12OmNyqzLWl",
"parentPublication": {
"id": "icicta/2008/3357/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceert/2021/3817/0/381700a196",
"title": "Research on Highway Environmental Protection System and Carbon Footprint Based on Full Process Management Theory",
"doi": null,
"abstractUrl": "/proceedings-article/iceert/2021/381700a196/1A3jdRJP0vC",
"parentPublication": {
"id": "proceedings/iceert/2021/3817/0",
"title": "2021 International Conference on Information Control, Electrical Engineering and Rail Transit (ICEERT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceert/2021/3817/0/381700a172",
"title": "On compaction construction technology of subgrade and pavement in Highway Engineering",
"doi": null,
"abstractUrl": "/proceedings-article/iceert/2021/381700a172/1A3jgKe6DwQ",
"parentPublication": {
"id": "proceedings/iceert/2021/3817/0",
"title": "2021 International Conference on Information Control, Electrical Engineering and Rail Transit (ICEERT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714120",
"title": "Synthesizing Personalized Construction Safety Training Scenarios for VR Training",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714120/1B0Y17ScN7G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a734",
"title": "Exploring How, for Whom and in Which Contexts Extended Reality Training 'Works' in Upskilling Healthcare Workers: A Realist Review",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a734/1CJdEkUzZHW",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceitsa/2022/6401/0/640100a143",
"title": "Design and Application of Highway DC Power Supply System",
"doi": null,
"abstractUrl": "/proceedings-article/iceitsa/2022/640100a143/1L0880SgF68",
"parentPublication": {
"id": "proceedings/iceitsa/2022/6401/0",
"title": "2022 2nd International Conference on Electronic Information Technology and Smart Agriculture (ICEITSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600a415",
"title": "Cooperative Vehicle-Infrastructure System Use Case Design for Smart Highway",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600a415/1x3kfuJGBEY",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJfsrBSunS",
"doi": "10.1109/VRW55335.2022.00124",
"title": "VCoach: Enabling Personalized Boxing Training in Virtual Reality",
"normalizedTitle": "VCoach: Enabling Personalized Boxing Training in Virtual Reality",
"abstract": "We propose a training system in virtual reality, VCoach, automati-cally generating interactive and personalized boxing training drills for individual trainees. The training drill is generated in real-time based on the trainee's updated performance observed through wear-able VR devices, including punch speed, reaction time, and punch pose. The training drill is visualized as a sequence of target points on a virtual heavy bag and the corresponding punch motion, as well as the performance feedback. Our experiments show that VCoach can generate personalized training drills to help trainees improve skills efficiently.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a training system in virtual reality, VCoach, automati-cally generating interactive and personalized boxing training drills for individual trainees. The training drill is generated in real-time based on the trainee's updated performance observed through wear-able VR devices, including punch speed, reaction time, and punch pose. The training drill is visualized as a sequence of target points on a virtual heavy bag and the corresponding punch motion, as well as the performance feedback. Our experiments show that VCoach can generate personalized training drills to help trainees improve skills efficiently.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a training system in virtual reality, VCoach, automati-cally generating interactive and personalized boxing training drills for individual trainees. The training drill is generated in real-time based on the trainee's updated performance observed through wear-able VR devices, including punch speed, reaction time, and punch pose. The training drill is visualized as a sequence of target points on a virtual heavy bag and the corresponding punch motion, as well as the performance feedback. Our experiments show that VCoach can generate personalized training drills to help trainees improve skills efficiently.",
"fno": "840200a546",
"keywords": [
"Computer Based Training",
"Image Motion Analysis",
"Military Computing",
"Virtual Reality",
"Individual Trainees",
"Training Drill",
"Trainee",
"Wear Able VR Devices",
"Punch Speed",
"Reaction Time",
"Virtual Heavy Bag",
"Corresponding Punch Motion",
"V Coach",
"Personalized Training Drills",
"Enabling Personalized Boxing Training",
"Virtual Reality",
"Training System",
"Automati Cally",
"Interactive Boxing Training Drills",
"Personalized Boxing Training Drills",
"Training",
"Performance Evaluation",
"Visualization",
"Solid Modeling",
"Three Dimensional Displays",
"Conferences",
"Virtual Reality",
"Human Centered Computing Visualization Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Hao Chen",
"givenName": "Hao",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Yujia Wang",
"givenName": "Yujia",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology, Yangtze Delta Region Academy of Beijing Institute of Technology,Jiaxing",
"fullName": "Wei Liang",
"givenName": "Wei",
"surname": "Liang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "546-547",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJfsmJi5J6",
"name": "pvrw202284020-09757557s1-mm_840200a546.zip",
"size": "11.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757557s1-mm_840200a546.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a544",
"articleId": "1CJcMmE19cY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a548",
"articleId": "1CJenT7ps08",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223437",
"title": "Low cost virtual reality for medical training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223437/12OmNxWLTFV",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2018/5500/0/550000b108",
"title": "Design of Simulation Training System of Self-Propelled Gun Based on Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2018/550000b108/17D45WgziTE",
"parentPublication": {
"id": "proceedings/icisce/2018/5500/0",
"title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a573",
"title": "An Evaluation of Virtual Reality Maintenance Training for Industrial Hydraulic Machines",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a573/1CJbIolGNbi",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a222",
"title": "Conceptual Design of Emotional and Pain Expressions of a Virtual Patient in a Virtual Reality Training for Paramedics",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a222/1CJcFRRMLV6",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a459",
"title": "Synthesizing Shared Space Virtual Reality Fire Evacuation Training Drills",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a459/1J7W7tUmffO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a413",
"title": "Research on Virtual Reality Simulation Training System of Substation",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a413/1ap5D4hxI2c",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797918",
"title": "Virtual Reality Training with Passive Haptic Feedback for CryoEM Sample Preparation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797918/1cJ14ZjqmCQ",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a566",
"title": "Virtual Reality Racket Sports: Virtual Drills for Exercise and Training",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a566/1pysv1cgLPa",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a044",
"title": "A Novel Tool for Immersive Authoring of Experiential Learning in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a044/1tnWQy5llCg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a633",
"title": "Immersive Authoring of Virtual Reality Training",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a633/1tnXNG6t1x6",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CzeDgkvuiA",
"title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)",
"acronym": "itei",
"groupId": "1846225",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1CzeG2lZvEI",
"doi": "10.1109/ITEI55021.2021.00060",
"title": "VR technology applied to traditional dance",
"normalizedTitle": "VR technology applied to traditional dance",
"abstract": "Virtual reality technology originated in the 20th century and is a new complex information technology with multimedia technology, sensor model, data calculation and other functions. VR technology, with its immersive virtual environment and sound effects, plays a huge role in traditional dance teaching. VR technology can realize multimedia intelligent teaching in traditional dance, and establish a THREE-DIMENSIONAL interactive model according to the needs of dance teaching, and construct a virtual teaching environment suitable for dance teaching. VR technology provides an ideal place for the training of dance teaching. The dance micro-training system based on virtual reality technology includes three modules: teaching scene model construction, virtual role construction and teaching environment model calculation. This paper mainly analyzes the interactive and model building functions of VR technology in dance teaching application.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality technology originated in the 20th century and is a new complex information technology with multimedia technology, sensor model, data calculation and other functions. VR technology, with its immersive virtual environment and sound effects, plays a huge role in traditional dance teaching. VR technology can realize multimedia intelligent teaching in traditional dance, and establish a THREE-DIMENSIONAL interactive model according to the needs of dance teaching, and construct a virtual teaching environment suitable for dance teaching. VR technology provides an ideal place for the training of dance teaching. The dance micro-training system based on virtual reality technology includes three modules: teaching scene model construction, virtual role construction and teaching environment model calculation. This paper mainly analyzes the interactive and model building functions of VR technology in dance teaching application.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality technology originated in the 20th century and is a new complex information technology with multimedia technology, sensor model, data calculation and other functions. VR technology, with its immersive virtual environment and sound effects, plays a huge role in traditional dance teaching. VR technology can realize multimedia intelligent teaching in traditional dance, and establish a THREE-DIMENSIONAL interactive model according to the needs of dance teaching, and construct a virtual teaching environment suitable for dance teaching. VR technology provides an ideal place for the training of dance teaching. The dance micro-training system based on virtual reality technology includes three modules: teaching scene model construction, virtual role construction and teaching environment model calculation. This paper mainly analyzes the interactive and model building functions of VR technology in dance teaching application.",
"fno": "805000a232",
"keywords": [
"Computer Based Training",
"Educational Courses",
"Humanities",
"Multimedia Computing",
"Solid Modelling",
"Teaching",
"Virtual Reality",
"VR Technology",
"Immersive Virtual Environment",
"Traditional Dance Teaching",
"Multimedia Intelligent Teaching",
"Virtual Teaching Environment",
"Dance Microtraining System",
"Virtual Reality",
"Virtual Role Construction",
"Teaching Environment Model Calculation",
"Complex Information Technology",
"Sensor Model",
"Data Calculation",
"Sound Effects",
"Three Dimensional Interactive Model",
"Dance Teaching Training",
"Training",
"Solid Modeling",
"Education",
"Buildings",
"Virtual Environments",
"Virtual Reality",
"Data Models",
"Vr Technology",
"Traditional",
"Dance"
],
"authors": [
{
"affiliation": "Jiangxi University of Applied Science,Nanchang,China,330100",
"fullName": "Xiaomei Zhu",
"givenName": "Xiaomei",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "itei",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-12-01T00:00:00",
"pubType": "proceedings",
"pages": "232-235",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-8050-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "805000a228",
"articleId": "1CzeOmPZtqU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "805000a236",
"articleId": "1CzeLjISRvW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/culture-and-computing/2017/1135/0/08227355",
"title": "Content Concept for VR-based Interactive Korean Traditional Dance ExperienZone (IKTDEZ)",
"doi": null,
"abstractUrl": "/proceedings-article/culture-and-computing/2017/08227355/17D45VVho3f",
"parentPublication": {
"id": "proceedings/culture-and-computing/2017/1135/0",
"title": "2017 International Conference on Culture and Computing (Culture and Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esaic/2018/8028/0/802800a343",
"title": "Research on Application of VR Technology in Art Design Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/esaic/2018/802800a343/17D45XdBRRM",
"parentPublication": {
"id": "proceedings/esaic/2018/8028/0",
"title": "2018 International Conference on Engineering Simulation and Intelligent Control (ESAIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isaiee/2021/7874/0/787400a204",
"title": "Teaching Mode of Art Design Wisdom Course Based on VR Technology",
"doi": null,
"abstractUrl": "/proceedings-article/isaiee/2021/787400a204/1BByh8CxzR6",
"parentPublication": {
"id": "proceedings/isaiee/2021/7874/0",
"title": "2021 International Symposium on Advances in Informatics, Electronics and Education (ISAIEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise-ie/2021/3829/0/382900b596",
"title": "VR Technology Applied in The Teaching of Equipment Courses",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900b596/1C8GFPyCUCs",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise-ie/2021/3829/0/382900b463",
"title": "Research on the application of establishing VR technology in dance communication",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900b463/1C8GGzWUhEc",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icenit/2022/6307/0/630700a093",
"title": "Research and Application of English Learning Games Based on VR technology",
"doi": null,
"abstractUrl": "/proceedings-article/icenit/2022/630700a093/1KCSKpRJIJy",
"parentPublication": {
"id": "proceedings/icenit/2022/6307/0",
"title": "2022 International Conference on Education, Network and Information Technology (ICENIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2022/1666/0/166600a231",
"title": "Application of VR Technology in Virtual Simulation Experiment Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2022/166600a231/1KpBw9u2vT2",
"parentPublication": {
"id": "proceedings/icekim/2022/1666/0",
"title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2019/2627/0/262700a178",
"title": "VR Teaching Materials for Dance Practice",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2019/262700a178/1hrLEMsofOE",
"parentPublication": {
"id": "proceedings/iiai-aai/2019/2627/0",
"title": "2019 8th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieit/2021/2563/0/256300a477",
"title": "Applied Research of VR Technology in Civil Engineering Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/ieit/2021/256300a477/1wHKq2RrP20",
"parentPublication": {
"id": "proceedings/ieit/2021/2563/0",
"title": "2021 International Conference on Internet, Education and Information Technology (IEIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eimss/2021/2707/0/270700a022",
"title": "Research on the Application of Digital Media Technology in Sports Dance Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/eimss/2021/270700a022/1yEZQHwiT6w",
"parentPublication": {
"id": "proceedings/eimss/2021/2707/0",
"title": "2021 International Conference on Education, Information Management and Service Science (EIMSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J2XO4LxJkc",
"title": "2022 International Conference on Artificial Intelligence and Autonomous Robot Systems (AIARS)",
"acronym": "aiars",
"groupId": "9942816",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JeAf32UJ2g",
"doi": "10.1109/AIARS57204.2022.00008",
"title": "Research on Personalized Film and Television Character Modeling Algorithm Based on VR Technology",
"normalizedTitle": "Research on Personalized Film and Television Character Modeling Algorithm Based on VR Technology",
"abstract": "As an emerging technology, VR technology will have a great impact on the development of film and television industry in the future. VR technology will provide new ideas for film creation and is regarded as an innovative technology to change the traditional film creation methods. Under the new situation, VR technology will be an important node of new technology subverting the whole film and television industry. By discussing the far-reaching impact of VR technology on the whole film industry and the construction of personalized film roles based on VR technology, it will have great scientific value and practical significance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As an emerging technology, VR technology will have a great impact on the development of film and television industry in the future. VR technology will provide new ideas for film creation and is regarded as an innovative technology to change the traditional film creation methods. Under the new situation, VR technology will be an important node of new technology subverting the whole film and television industry. By discussing the far-reaching impact of VR technology on the whole film industry and the construction of personalized film roles based on VR technology, it will have great scientific value and practical significance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As an emerging technology, VR technology will have a great impact on the development of film and television industry in the future. VR technology will provide new ideas for film creation and is regarded as an innovative technology to change the traditional film creation methods. Under the new situation, VR technology will be an important node of new technology subverting the whole film and television industry. By discussing the far-reaching impact of VR technology on the whole film industry and the construction of personalized film roles based on VR technology, it will have great scientific value and practical significance.",
"fno": "545700a001",
"keywords": [
"Entertainment",
"Virtual Reality",
"Film Creation Methods",
"Film Industry",
"Personalized Film Character Modeling Algorithm",
"Personalized Film Roles",
"Personalized Television Character Modeling Algorithm",
"Television Industry",
"VR Technology",
"TV",
"Entertainment Industry",
"Autonomous Robots",
"VR Technology",
"Character Modeling",
"Algorithm Research"
],
"authors": [
{
"affiliation": "Xi’an International Studies University,School of Journalism and Communication,Xi’an,China",
"fullName": "Rui Song",
"givenName": "Rui",
"surname": "Song",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aiars",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-4",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5457-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "545700a005",
"articleId": "1J2XSTNXgic",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/esiat/2009/3682/1/3682a705",
"title": "The Study of Film and TV Cultural Creative Industry Based on Digital Technology",
"doi": null,
"abstractUrl": "/proceedings-article/esiat/2009/3682a705/12OmNvJXeBJ",
"parentPublication": {
"id": "proceedings/esiat/2009/3682/1",
"title": "Environmental Science and Information Application Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2018/5892/0/08466381",
"title": "The Design and Implementation of Script Authoring Assistant System of Film and Television Big Data",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2018/08466381/13Jkr9ZUXTn",
"parentPublication": {
"id": "proceedings/icis/2018/5892/0",
"title": "2018 IEEE/ACIS 17th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdsba/2021/4590/0/459000a180",
"title": "On the Application of Calligraphy Art in Digital Film and Television Works",
"doi": null,
"abstractUrl": "/proceedings-article/icdsba/2021/459000a180/1AH7z3QBteM",
"parentPublication": {
"id": "proceedings/icdsba/2021/4590/0",
"title": "2021 5th Annual International Conference on Data Science and Business Analytics (ICDSBA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2022/9463/0/09882501",
"title": "Design and Implementation of Intelligent Education Platform with Film and Television Features Centered on Textbooks",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2022/09882501/1GBSzy7mi64",
"parentPublication": {
"id": "proceedings/icis/2022/9463/0",
"title": "2022 IEEE/ACIS 22nd International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scout/2021/0767/0/076700a218",
"title": "Research on Film and Television IP Consuming Intention of Generation Z College Students",
"doi": null,
"abstractUrl": "/proceedings-article/scout/2021/076700a218/1IbRKeotlZe",
"parentPublication": {
"id": "proceedings/scout/2021/0767/0",
"title": "2021 Smart City Challenges & Outcomes for Urban Transformation (SCOUT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiars/2022/5457/0/545700a324",
"title": "Real Time Tracking Simulation of Multi-frame Film and Television Image Features Based on Digital Image Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/aiars/2022/545700a324/1J2XV4Ife9i",
"parentPublication": {
"id": "proceedings/aiars/2022/5457/0",
"title": "2022 International Conference on Artificial Intelligence and Autonomous Robot Systems (AIARS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ijcime/2019/5586/0/558600a203",
"title": "The Research about Film and Television Information Management and Integrated Service",
"doi": null,
"abstractUrl": "/proceedings-article/ijcime/2019/558600a203/1j9wxxihhVS",
"parentPublication": {
"id": "proceedings/ijcime/2019/5586/0",
"title": "2019 International Joint Conference on Information, Media and Engineering (IJCIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dfhmc/2020/1518/0/151800a005",
"title": "Network video - the new direction of film market development",
"doi": null,
"abstractUrl": "/proceedings-article/dfhmc/2020/151800a005/1tcjSgwQJwI",
"parentPublication": {
"id": "proceedings/dfhmc/2020/1518/0",
"title": "2020 16th Dahe Fortune China Forum and Chinese High-educational Management Annual Academic Conference (DFHMC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0/148500b197",
"title": "A Film and TV News Digest Generation method Based on HanLP",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2020/148500b197/1ua4AYLOWxW",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0",
"title": "2020 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2021/4486/0/448600a051",
"title": "Personalized Film and Television Recommendation System Based on Big Data Platform",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2021/448600a051/1yEZmefkI7e",
"parentPublication": {
"id": "proceedings/iccnea/2021/4486/0",
"title": "2021 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1MuZAVrWsOQ",
"title": "2022 International Conference on Artificial Intelligence of Things and Crowdsensing (AIoTCs)",
"acronym": "aiotcs",
"groupId": "10102096",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1MuZBVmcpFe",
"doi": "10.1109/AIoTCs58181.2022.00103",
"title": "VR Popular Science Education and Training System on Account of 3D Engine",
"normalizedTitle": "VR Popular Science Education and Training System on Account of 3D Engine",
"abstract": "With the rapid development of society and economy, the number of people who sincerely and actively participate in popular science education is increasing every year. However, the effect of traditional education mode still cannot reach the expected standard under the condition of high investment in space, capital and human resources. Based on 3D engine technology, this paper innovates the methods of site scene, training and quality assessment of traditional education mode, removes the unfavorable factors of traditional education system and adds the favorable factors of education system to create a new VR popular science education and training system. This method creates a new system with virtual scene construction, scientific teaching and research program, efficient learning environment and one-stop teaching service. This system has the advantages of low investment and high availability of resources. The practical exploration of 3D VR popular science education is expected to provide theoretical support and practical guidance for 3D VR popular science education exploration from the construction of 3D VR popular science education design framework to specific teaching design and teaching practice research. This paper studies the relevant professional theories and knowledge of VR popular science education and training system based on 3D engine, introduces some concepts and contents of VR popular science education and training system based on 3D engine, and analyzes and studies the VR popular science education and training system. After data testing, the effect was analyzed, and the test results showed that the 3D engine-based VR popular science education and training system achieved 80.85% in image rendering.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the rapid development of society and economy, the number of people who sincerely and actively participate in popular science education is increasing every year. However, the effect of traditional education mode still cannot reach the expected standard under the condition of high investment in space, capital and human resources. Based on 3D engine technology, this paper innovates the methods of site scene, training and quality assessment of traditional education mode, removes the unfavorable factors of traditional education system and adds the favorable factors of education system to create a new VR popular science education and training system. This method creates a new system with virtual scene construction, scientific teaching and research program, efficient learning environment and one-stop teaching service. This system has the advantages of low investment and high availability of resources. The practical exploration of 3D VR popular science education is expected to provide theoretical support and practical guidance for 3D VR popular science education exploration from the construction of 3D VR popular science education design framework to specific teaching design and teaching practice research. This paper studies the relevant professional theories and knowledge of VR popular science education and training system based on 3D engine, introduces some concepts and contents of VR popular science education and training system based on 3D engine, and analyzes and studies the VR popular science education and training system. After data testing, the effect was analyzed, and the test results showed that the 3D engine-based VR popular science education and training system achieved 80.85% in image rendering.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the rapid development of society and economy, the number of people who sincerely and actively participate in popular science education is increasing every year. However, the effect of traditional education mode still cannot reach the expected standard under the condition of high investment in space, capital and human resources. Based on 3D engine technology, this paper innovates the methods of site scene, training and quality assessment of traditional education mode, removes the unfavorable factors of traditional education system and adds the favorable factors of education system to create a new VR popular science education and training system. This method creates a new system with virtual scene construction, scientific teaching and research program, efficient learning environment and one-stop teaching service. This system has the advantages of low investment and high availability of resources. The practical exploration of 3D VR popular science education is expected to provide theoretical support and practical guidance for 3D VR popular science education exploration from the construction of 3D VR popular science education design framework to specific teaching design and teaching practice research. This paper studies the relevant professional theories and knowledge of VR popular science education and training system based on 3D engine, introduces some concepts and contents of VR popular science education and training system based on 3D engine, and analyzes and studies the VR popular science education and training system. After data testing, the effect was analyzed, and the test results showed that the 3D engine-based VR popular science education and training system achieved 80.85% in image rendering.",
"fno": "341000a631",
"keywords": [
"Computer Based Training",
"Investment",
"Teaching",
"Virtual Reality",
"3 D Engine Based VR Popular Science Education And Training System",
"Data Testing",
"Image Rendering",
"Investment",
"Quality Assessment",
"Research Program",
"Scientific Teaching",
"Site Scene Methods",
"Teaching Design",
"Teaching Practice Research",
"Traditional Education Mode",
"Virtual Scene Construction",
"Training",
"Three Dimensional Displays",
"Telepresence",
"Virtual Reality",
"Rendering Computer Graphics",
"Quality Assessment",
"Engines",
"3 D Engine",
"VR System",
"Popular Science Education",
"Training System"
],
"authors": [
{
"affiliation": "DaLian Neusoft University of Informaition,Art and Design,Dalian,Liaoning,China,116000",
"fullName": "Liya Fu",
"givenName": "Liya",
"surname": "Fu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Digital Art and Design, DaLian Neusoft University of Informaition,Dalian,Liaoning,China,116000",
"fullName": "Nan Gao",
"givenName": "Nan",
"surname": "Gao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aiotcs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "631-635",
"year": "2022",
"issn": null,
"isbn": "979-8-3503-3410-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "341000a626",
"articleId": "1MuZJ7SVBVC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "341000a637",
"articleId": "1MuZOWWYF7G",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/hpcasia/2004/2138/0/21380289",
"title": "3D VR Engine",
"doi": null,
"abstractUrl": "/proceedings-article/hpcasia/2004/21380289/12OmNBOCWgk",
"parentPublication": {
"id": "proceedings/hpcasia/2004/2138/0",
"title": "High Performance Computing and Grid in Asia Pacific Region, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/5/04723009",
"title": "Research on Robotic Popular Science System Based on LEGO Bricks",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/04723009/12OmNC8dgeV",
"parentPublication": {
"id": "proceedings/csse/2008/3336/5",
"title": "2008 International Conference on Computer Science and Software Engineering (CSSE 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2009/3735/7/05360079",
"title": "The Research and Implementation of the Deep Search Engine of Popular Science",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2009/05360079/12OmNroij86",
"parentPublication": {
"id": "proceedings/fskd/2009/3735/7",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892342",
"title": "Uni-CAVE: A Unity3D plugin for non-head mounted VR display systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892342/12OmNs5rkSv",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223460",
"title": "MiddleVR a generic VR toolbox",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223460/12OmNy1SFNX",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223366",
"title": "BlenderVR: Open-source framework for interactive and immersive VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223366/12OmNy7Qfpl",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2009/3823/4/3823e897",
"title": "Design of a Platform of Popular Science Education Based on Social Computing",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2009/3823e897/12OmNyaGeFk",
"parentPublication": {
"id": "proceedings/cse/2009/3823/2",
"title": "2009 International Conference on Computational Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2017/0629/0/0629a311",
"title": "Utilizing Augmented Reality to Support Students' Learning in Popular Science Courses",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2017/0629a311/12OmNywxlPq",
"parentPublication": {
"id": "proceedings/eitt/2017/0629/0",
"title": "2017 International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2022/1666/0/166600a319",
"title": "Analysis of the Status Quo of Popular Science Animation Education Based on Python",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2022/166600a319/1KpBJCD2M7K",
"parentPublication": {
"id": "proceedings/icekim/2022/1666/0",
"title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797760",
"title": "Live Coding of a VR Render Engine in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797760/1cJ0OtcoEDe",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJd1TReEYo",
"doi": "10.1109/VRW55335.2022.00266",
"title": "Redirected Walking in 360° Video: Effect of Environment Size on Detection Thresholds for Translation and Rotation Gains",
"normalizedTitle": "Redirected Walking in 360° Video: Effect of Environment Size on Detection Thresholds for Translation and Rotation Gains",
"abstract": "Using real walking to control the playback of the 360° videos is a natural and immersive way to match visual and self-motion perception. Redirected walking can enable users to walk in limited physical tracking space but experience larger scenes. Environment size may affect user perception in 360° videos. We conducted a user study about the detection thresholds (DTs) for translation and rotation gains in 360° video-based virtual environments in three scenes with different widths. Results show that environment size of the scene increases the DTs for both lower and upper translation gains but doesn't affect the DTs for rotation gains.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Using real walking to control the playback of the 360° videos is a natural and immersive way to match visual and self-motion perception. Redirected walking can enable users to walk in limited physical tracking space but experience larger scenes. Environment size may affect user perception in 360° videos. We conducted a user study about the detection thresholds (DTs) for translation and rotation gains in 360° video-based virtual environments in three scenes with different widths. Results show that environment size of the scene increases the DTs for both lower and upper translation gains but doesn't affect the DTs for rotation gains.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Using real walking to control the playback of the 360° videos is a natural and immersive way to match visual and self-motion perception. Redirected walking can enable users to walk in limited physical tracking space but experience larger scenes. Environment size may affect user perception in 360° videos. We conducted a user study about the detection thresholds (DTs) for translation and rotation gains in 360° video-based virtual environments in three scenes with different widths. Results show that environment size of the scene increases the DTs for both lower and upper translation gains but doesn't affect the DTs for rotation gains.",
"fno": "840200a830",
"keywords": [
"Gait Analysis",
"Virtual Reality",
"Visual Perception",
"Environment Size",
"Detection Thresholds",
"Rotation Gains",
"Self Motion Perception",
"Redirected Walking",
"Limited Physical Tracking Space",
"Larger Scenes",
"User Perception",
"D Ts",
"360 X 00 B 0 Video Based Virtual Environments",
"Lower Translation Gains",
"Upper Translation Gains",
"Legged Locomotion",
"Visualization",
"Three Dimensional Displays",
"Design Automation",
"Conferences",
"Virtual Environments",
"User Interfaces",
"Virtual Reality",
"Redirected Walking",
"360 X 00 B 0 Video",
"Locomotion",
"Virtual Travel",
"Systems",
"Man",
"Cybernetics",
"User Interfaces X 2014 Human Computer Interaction X 2014 Immersive Experience",
"Electronic Design Automation And Methodology",
"Design Methodology X 2014 Graphics X 2014 Virtual Reality"
],
"authors": [
{
"affiliation": "University of Science and Technology of China",
"fullName": "Yanxiang Zhang",
"givenName": "Yanxiang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Qingqin Liu",
"givenName": "Qingqin",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Yingna Wang",
"givenName": "Yingna",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "830-831",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a828",
"articleId": "1CJcMwF5tO8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a832",
"articleId": "1CJdee1FBGU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892279",
"title": "Curvature gains in redirected walking: A closer look",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892279/12OmNBEGYJE",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446479",
"title": "Adopting the Roll Manipulation for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446062",
"title": "Biomechanical Parameters Under Curvature Gains and Bending Gains in Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446062/13bd1fKQxrR",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446225",
"title": "Effect of Environment Size on Curvature Redirected Walking Thresholds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08314105",
"title": "Detection Thresholds for Rotation and Translation Gains in 360° Video-Based Telepresence Systems",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08314105/13rRUxASubD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797976",
"title": "Estimation of Detection Thresholds for Redirected Turning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797976/1cJ0Y99SR1K",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798117",
"title": "Estimation of Rotation Gain Thresholds for Redirected Walking Considering FOV and Gender",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798117/1cJ1fo5PwqY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794563",
"title": "Estimation of Rotation Gain Thresholds Considering FOV, Gender, and Distractors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794563/1dNHkjixhDi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a358",
"title": "Revisiting Audiovisual Rotation Gains for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a358/1tnXe22MFJm",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1gyr6w5YIIU",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gyrgYBrmpy",
"doi": "10.1109/CVPR.2019.01042",
"title": "Viewport Proposal CNN for 360° Video Quality Assessment",
"normalizedTitle": "Viewport Proposal CNN for 360° Video Quality Assessment",
"abstract": "Recent years have witnessed the growing interest in visual quality assessment (VQA) for 360° video. Unfortunately, the existing VQA approaches do not consider the facts that: 1) Observers only see viewports of 360° video, rather than patches or whole 360° frames. 2) Within the viewport, only salient regions can be perceived by observers with high resolution. Thus, this paper proposes a viewport-based convolutional neural network (V-CNN) approach for VQA on 360° video, considering both auxiliary tasks of viewport proposal and viewport saliency prediction. Our V-CNN approach is composed of two stages, i.e., viewport proposal and VQA. In the first stage, the viewport proposal network (VP-net) is developed to yield several potential viewports, seen as the first auxiliary task. In the second stage, a viewport quality network (VQ-net) is designed to rate the VQA score for each proposed viewport, in which the saliency map of the viewport is predicted and then utilized in VQA score rating. Consequently, another auxiliary task of viewport saliency prediction can be achieved. More importantly, the main task of VQA on 360° video can be accomplished via integrating the VQA scores of all viewports. The experiments validate the effectiveness of our V-CNN approach in significantly advancing the state-of-the-art performance of VQA on 360° video. In addition, our approach achieves comparable performance in two auxiliary tasks. The code of our V-CNN approach is available at https://github.com/Archer-Tatsu/V-CNN.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent years have witnessed the growing interest in visual quality assessment (VQA) for 360° video. Unfortunately, the existing VQA approaches do not consider the facts that: 1) Observers only see viewports of 360° video, rather than patches or whole 360° frames. 2) Within the viewport, only salient regions can be perceived by observers with high resolution. Thus, this paper proposes a viewport-based convolutional neural network (V-CNN) approach for VQA on 360° video, considering both auxiliary tasks of viewport proposal and viewport saliency prediction. Our V-CNN approach is composed of two stages, i.e., viewport proposal and VQA. In the first stage, the viewport proposal network (VP-net) is developed to yield several potential viewports, seen as the first auxiliary task. In the second stage, a viewport quality network (VQ-net) is designed to rate the VQA score for each proposed viewport, in which the saliency map of the viewport is predicted and then utilized in VQA score rating. Consequently, another auxiliary task of viewport saliency prediction can be achieved. More importantly, the main task of VQA on 360° video can be accomplished via integrating the VQA scores of all viewports. The experiments validate the effectiveness of our V-CNN approach in significantly advancing the state-of-the-art performance of VQA on 360° video. In addition, our approach achieves comparable performance in two auxiliary tasks. The code of our V-CNN approach is available at https://github.com/Archer-Tatsu/V-CNN.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent years have witnessed the growing interest in visual quality assessment (VQA) for 360° video. Unfortunately, the existing VQA approaches do not consider the facts that: 1) Observers only see viewports of 360° video, rather than patches or whole 360° frames. 2) Within the viewport, only salient regions can be perceived by observers with high resolution. Thus, this paper proposes a viewport-based convolutional neural network (V-CNN) approach for VQA on 360° video, considering both auxiliary tasks of viewport proposal and viewport saliency prediction. Our V-CNN approach is composed of two stages, i.e., viewport proposal and VQA. In the first stage, the viewport proposal network (VP-net) is developed to yield several potential viewports, seen as the first auxiliary task. In the second stage, a viewport quality network (VQ-net) is designed to rate the VQA score for each proposed viewport, in which the saliency map of the viewport is predicted and then utilized in VQA score rating. Consequently, another auxiliary task of viewport saliency prediction can be achieved. More importantly, the main task of VQA on 360° video can be accomplished via integrating the VQA scores of all viewports. The experiments validate the effectiveness of our V-CNN approach in significantly advancing the state-of-the-art performance of VQA on 360° video. In addition, our approach achieves comparable performance in two auxiliary tasks. The code of our V-CNN approach is available at https://github.com/Archer-Tatsu/V-CNN.",
"fno": "329300k0169",
"keywords": [
"Convolutional Neural Nets",
"Image Motion Analysis",
"Learning Artificial Intelligence",
"Video Coding",
"VQ Net",
"Viewport Proposal Network",
"Viewport Based Convolutional Neural Network Approach",
"Visual Quality Assessment",
"Video Quality Assessment",
"Viewport Proposal CNN",
"V CNN Approach",
"Viewport Saliency Prediction",
"VQA Score Rating",
"Viewport Quality Network",
"Auxiliary Task",
"Low Level Vision"
],
"authors": [
{
"affiliation": "BUAA",
"fullName": "Chen Li",
"givenName": "Chen",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "BUAA",
"fullName": "Mai Xu",
"givenName": "Mai",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "BUAA",
"fullName": "Lai Jiang",
"givenName": "Lai",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "BUAA",
"fullName": "Shanyi Zhang",
"givenName": "Shanyi",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua Univ.",
"fullName": "Xiaoming Tao",
"givenName": "Xiaoming",
"surname": "Tao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-06-01T00:00:00",
"pubType": "proceedings",
"pages": "10169-10178",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3293-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "329300k0159",
"articleId": "1gyrsgmlM8o",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "329300k0179",
"articleId": "1gys3CCQLmM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2018/4195/0/08551577",
"title": "Viewport-Driven Rate-Distortion Optimized Scalable Live 360° Video Network Multicast",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551577/17D45WZZ7Db",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a001",
"title": "Bullet Comments for 360°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a001/1CJcgerbwNa",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859963",
"title": "CoLive: An Edge-Assisted Online Learning Framework for Viewport Prediction in 360° Live Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859963/1G9EwWVBvuo",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a324",
"title": "VAS360: QoE-Driven Viewport Adaptive Streaming for 360 Video",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a324/1cJ0BSNq6FW",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a296",
"title": "Content-Aware Perspective Projection Optimization for Viewport Rendering of 360° Images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a296/1cdOTlMdEYw",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2019/5606/0/560600a200",
"title": "Encoding Configurations for Tile-Based 360° Video",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2019/560600a200/1gFJebKape8",
"parentPublication": {
"id": "proceedings/ism/2019/5606/0",
"title": "2019 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2019/5604/0/560400a074",
"title": "Viewport Forecasting in 360° Virtual Reality Videos with Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2019/560400a074/1grOlOCkPuM",
"parentPublication": {
"id": "proceedings/aivr/2019/5604/0",
"title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093452",
"title": "Visual Question Answering on 360° Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093452/1jPbCyCHgkw",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/04/09212608",
"title": "Viewport-Based CNN: A Multi-Task Approach for Assessing 360° Video Quality",
"doi": null,
"abstractUrl": "/journal/tp/2022/04/09212608/1nG8VYgj7Ik",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a085",
"title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxelz8ZMs",
"doi": "10.1109/VR46266.2020.00027",
"title": "SalBiNet360: Saliency Prediction on 360° Images with Local-Global Bifurcated Deep Network",
"normalizedTitle": "SalBiNet360: Saliency Prediction on 360° Images with Local-Global Bifurcated Deep Network",
"abstract": "With the development of the virtual reality applications, predicting human visual attention on 360° images is valuable to content creators and encoding algorithms, and becomes essential to understand user behaviour. In this paper, we propose a local-global bifurcated deep network for saliency prediction on 360° images, which is named as SalBiNet360. In the global deep sub-network, multiple multi-scale contextual modules and a multilevel decoder are utilized to integrate the features from the middle and deep layers of the network. In the local deep sub-network, only one multi-scale contextual module and a single-level decoder are utilized to reduce the redundancy of local saliency maps. Finally, fused saliency maps are generated by linear combination of the global and local saliency maps. Experiments on two publicly available datasets illustrate that the proposed SalBiNet360 outperforms the tested state-of-the-art methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the development of the virtual reality applications, predicting human visual attention on 360° images is valuable to content creators and encoding algorithms, and becomes essential to understand user behaviour. In this paper, we propose a local-global bifurcated deep network for saliency prediction on 360° images, which is named as SalBiNet360. In the global deep sub-network, multiple multi-scale contextual modules and a multilevel decoder are utilized to integrate the features from the middle and deep layers of the network. In the local deep sub-network, only one multi-scale contextual module and a single-level decoder are utilized to reduce the redundancy of local saliency maps. Finally, fused saliency maps are generated by linear combination of the global and local saliency maps. Experiments on two publicly available datasets illustrate that the proposed SalBiNet360 outperforms the tested state-of-the-art methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the development of the virtual reality applications, predicting human visual attention on 360° images is valuable to content creators and encoding algorithms, and becomes essential to understand user behaviour. In this paper, we propose a local-global bifurcated deep network for saliency prediction on 360° images, which is named as SalBiNet360. In the global deep sub-network, multiple multi-scale contextual modules and a multilevel decoder are utilized to integrate the features from the middle and deep layers of the network. In the local deep sub-network, only one multi-scale contextual module and a single-level decoder are utilized to reduce the redundancy of local saliency maps. Finally, fused saliency maps are generated by linear combination of the global and local saliency maps. Experiments on two publicly available datasets illustrate that the proposed SalBiNet360 outperforms the tested state-of-the-art methods.",
"fno": "09089519",
"keywords": [
"Feature Extraction",
"Observers",
"Predictive Models",
"Two Dimensional Displays",
"Solid Modeling",
"Decoding",
"Visualization",
"360 X 00 B 0 Images",
"Sal Bi Net 360",
"Virtual Reality VR"
],
"authors": [
{
"affiliation": "South China University of Technology,School of Electronic and Information Engineering,Guangzhou,China",
"fullName": "Dongwen Chen",
"givenName": "Dongwen",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "South China University of Technology,School of Electronic and Information Engineering,Guangzhou,China",
"fullName": "Chunmei Qing",
"givenName": "Chunmei",
"surname": "Qing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "South China University of Technology,School of Electronic and Information Engineering,Guangzhou,China",
"fullName": "Xiangmin Xu",
"givenName": "Xiangmin",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "South China University of Technology,School of Electronic and Information Engineering,Guangzhou,China",
"fullName": "Huansheng Zhu",
"givenName": "Huansheng",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "92-100",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089591",
"articleId": "1jIxfUJDsrK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089444",
"articleId": "1jIxcBQWkyA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034c331",
"title": "SaltiNet: Scan-Path Prediction on 360 Degree Images Using Saliency Volumes",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c331/12OmNAiFIc9",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/061P1B08",
"title": "Exploiting local and global patch rarities for saliency detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/061P1B08/12OmNrY3LuC",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07298938",
"title": "Deep networks for saliency detection via local estimation and global search",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298938/12OmNwKGAkM",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714046",
"title": "ScanGAN360: A Generative Model of Realistic Scanpaths for 360° Images",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714046/1B0Y1GfEIQ8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a258",
"title": "360° Surface Regression with a Hyper-Sphere Loss",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a258/1ezRDMEgU3C",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0169",
"title": "Viewport Proposal CNN for 360° Video Quality Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0169/1gyrgYBrmpy",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/04/09212608",
"title": "Viewport-Based CNN: A Multi-Task Approach for Assessing 360° Video Quality",
"doi": null,
"abstractUrl": "/journal/tp/2022/04/09212608/1nG8VYgj7Ik",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199564",
"title": "Stage-wise Salient Object Detection in 360° Omnidirectional Image via Object-level Semantical Saliency Ranking",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199564/1ncgt74HUIM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2021/4989/0/09456008",
"title": "Visual Saliency Prediction on 360 Degree Images With CNN",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2021/09456008/1uCgpAC9kZi",
"parentPublication": {
"id": "proceedings/icmew/2021/4989/0",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100d743",
"title": "Simple baselines can fool 360° saliency metrics",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100d743/1yNiDufgtWg",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jPbbHBGDHq",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jPbCyCHgkw",
"doi": "10.1109/WACV45572.2020.9093452",
"title": "Visual Question Answering on 360° Images",
"normalizedTitle": "Visual Question Answering on 360° Images",
"abstract": "In this work, we introduce VQA 360°, a novel task of visual question answering on 360° images. Unlike a normal field-of-view image, a 360° image captures the entire visual content around the optical center of a camera, demanding more sophisticated spatial understanding and reasoning. To address this problem, we collect the first VQA 360° dataset, containing around 17,000 real-world image-question-answer triplets for a variety of question types. We then study two different VQA models on VQA 360°, including one conventional model that takes an equirectangular image (with intrinsic distortion) as input and one dedicated model that first projects a 360° image onto cubemaps and subsequently aggregates the information from multiple spatial resolutions. We demonstrate that the cubemap-based model with multi-level fusion and attention diffusion performs favorably against other variants and the equirectangular-based models. Nevertheless, the gap between the humans' and machines' performance reveals the need for more advanced VQA 360° algorithms. We, therefore, expect our dataset and studies to serve as the benchmark for future development in this challenging task. Dataset, code, and pre-trained models are available online.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we introduce VQA 360°, a novel task of visual question answering on 360° images. Unlike a normal field-of-view image, a 360° image captures the entire visual content around the optical center of a camera, demanding more sophisticated spatial understanding and reasoning. To address this problem, we collect the first VQA 360° dataset, containing around 17,000 real-world image-question-answer triplets for a variety of question types. We then study two different VQA models on VQA 360°, including one conventional model that takes an equirectangular image (with intrinsic distortion) as input and one dedicated model that first projects a 360° image onto cubemaps and subsequently aggregates the information from multiple spatial resolutions. We demonstrate that the cubemap-based model with multi-level fusion and attention diffusion performs favorably against other variants and the equirectangular-based models. Nevertheless, the gap between the humans' and machines' performance reveals the need for more advanced VQA 360° algorithms. We, therefore, expect our dataset and studies to serve as the benchmark for future development in this challenging task. Dataset, code, and pre-trained models are available online.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we introduce VQA 360°, a novel task of visual question answering on 360° images. Unlike a normal field-of-view image, a 360° image captures the entire visual content around the optical center of a camera, demanding more sophisticated spatial understanding and reasoning. To address this problem, we collect the first VQA 360° dataset, containing around 17,000 real-world image-question-answer triplets for a variety of question types. We then study two different VQA models on VQA 360°, including one conventional model that takes an equirectangular image (with intrinsic distortion) as input and one dedicated model that first projects a 360° image onto cubemaps and subsequently aggregates the information from multiple spatial resolutions. We demonstrate that the cubemap-based model with multi-level fusion and attention diffusion performs favorably against other variants and the equirectangular-based models. Nevertheless, the gap between the humans' and machines' performance reveals the need for more advanced VQA 360° algorithms. We, therefore, expect our dataset and studies to serve as the benchmark for future development in this challenging task. Dataset, code, and pre-trained models are available online.",
"fno": "09093452",
"keywords": [
"Computational Geometry",
"Data Visualisation",
"Image Capture",
"Image Fusion",
"Image Resolution",
"Question Answering Information Retrieval",
"Stereo Image Processing",
"Multilevel Fusion",
"Cubemaps",
"360 X 00 B 0 Image Capture",
"Spatial Resolutions",
"Equirectangular Image",
"VQA 360 X 00 B 0 Dataset",
"Visual Question Answering",
"Visualization",
"Task Analysis",
"Feature Extraction",
"Distortion",
"Cognition",
"Image Color Analysis",
"Spatial Resolution"
],
"authors": [
{
"affiliation": "University of British Columbia",
"fullName": "Shih-Han Chou",
"givenName": "Shih-Han",
"surname": "Chou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Ohio State University",
"fullName": "Wei-Lun Chao",
"givenName": "Wei-Lun",
"surname": "Chao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google",
"fullName": "Wei-Sheng Lai",
"givenName": "Wei-Sheng",
"surname": "Lai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University",
"fullName": "Min Sun",
"givenName": "Min",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California at Merced",
"fullName": "Ming-Hsuan Yang",
"givenName": "Ming-Hsuan",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1596-1605",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6553-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09093597",
"articleId": "1jPbftZOuUo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09093291",
"articleId": "1jPbtj4tmc8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391c425",
"title": "VQA: Visual Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391c425/12OmNrYlmBL",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c011",
"title": "Pano-AVQA: Grounded Audio-Visual Question Answering on 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c011/1BmLjJCm02Q",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600d752",
"title": "360MonoDepth: High-Resolution 360° Monocular Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600d752/1H1mgCrsMtG",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956354",
"title": "Channel-Spatial Mutual Attention Network for 360° Salient Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956354/1IHq6Mn0tUc",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a258",
"title": "360° Surface Regression with a Hyper-Sphere Loss",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a258/1ezRDMEgU3C",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnp/2019/2700/0/08888135",
"title": "CoRE: Non-Linear 3D Sampling for Robust 360° Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icnp/2019/08888135/1ezRKG5Cp32",
"parentPublication": {
"id": "proceedings/icnp/2019/2700/0",
"title": "2019 IEEE 27th International Conference on Network Protocols (ICNP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0169",
"title": "Viewport Proposal CNN for 360° Video Quality Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0169/1gyrgYBrmpy",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093262",
"title": "360-Indoor: Towards Learning Real-World Objects in 360° Indoor Equirectangular Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093262/1jPbAWPyE8g",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/04/09212608",
"title": "Viewport-Based CNN: A Multi-Task Approach for Assessing 360° Video Quality",
"doi": null,
"abstractUrl": "/journal/tp/2022/04/09212608/1nG8VYgj7Ik",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2021/4989/0/09456008",
"title": "Visual Saliency Prediction on 360 Degree Images With CNN",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2021/09456008/1uCgpAC9kZi",
"parentPublication": {
"id": "proceedings/icmew/2021/4989/0",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeQBWUxple",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00045",
"title": "Enabling Collaborative Interaction with 360° Panoramas between Large-scale Displays and Immersive Headsets",
"normalizedTitle": "Enabling Collaborative Interaction with 360° Panoramas between Large-scale Displays and Immersive Headsets",
"abstract": "Head mounted displays (HMDs) can provide users with an immersive virtual reality (VR) experience, but often are limited to viewing a single environment or data set at a time. In this paper, we describe a system of networked applications whereby co-located users in the real world can use a large-scale display wall to collaborate and share data with immersed users wearing HMDs. Our work focuses on the sharing of 360° surround-view panoramic images and contextual annotations. The large-scale display wall affords non-immersed users the ability to view a multitude of contextual information and the HMDs afford the ability for users to immerse themselves in a virtual scene. The asymmetric virtual reality collaboration between immersed and non-immersed individuals can lead to deeper under-standing and the feeling of a shared experience. We will highlight a series of use cases – two digital humanities projects that capture real locations using a 360° camera, and one scientific discovery project that uses computer generated 360° surround-view panoramas. In all cases, groups can benefit from both the immersive capabilities of HMDs and the collaborative affordances of large-scale display walls, and a unified experience is created for all users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Head mounted displays (HMDs) can provide users with an immersive virtual reality (VR) experience, but often are limited to viewing a single environment or data set at a time. In this paper, we describe a system of networked applications whereby co-located users in the real world can use a large-scale display wall to collaborate and share data with immersed users wearing HMDs. Our work focuses on the sharing of 360° surround-view panoramic images and contextual annotations. The large-scale display wall affords non-immersed users the ability to view a multitude of contextual information and the HMDs afford the ability for users to immerse themselves in a virtual scene. The asymmetric virtual reality collaboration between immersed and non-immersed individuals can lead to deeper under-standing and the feeling of a shared experience. We will highlight a series of use cases – two digital humanities projects that capture real locations using a 360° camera, and one scientific discovery project that uses computer generated 360° surround-view panoramas. In all cases, groups can benefit from both the immersive capabilities of HMDs and the collaborative affordances of large-scale display walls, and a unified experience is created for all users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Head mounted displays (HMDs) can provide users with an immersive virtual reality (VR) experience, but often are limited to viewing a single environment or data set at a time. In this paper, we describe a system of networked applications whereby co-located users in the real world can use a large-scale display wall to collaborate and share data with immersed users wearing HMDs. Our work focuses on the sharing of 360° surround-view panoramic images and contextual annotations. The large-scale display wall affords non-immersed users the ability to view a multitude of contextual information and the HMDs afford the ability for users to immerse themselves in a virtual scene. The asymmetric virtual reality collaboration between immersed and non-immersed individuals can lead to deeper under-standing and the feeling of a shared experience. We will highlight a series of use cases – two digital humanities projects that capture real locations using a 360° camera, and one scientific discovery project that uses computer generated 360° surround-view panoramas. In all cases, groups can benefit from both the immersive capabilities of HMDs and the collaborative affordances of large-scale display walls, and a unified experience is created for all users.",
"fno": "129800a183",
"keywords": [
"Groupware",
"Helmet Mounted Displays",
"Humanities",
"Virtual Reality",
"Collaborative Interaction",
"Large Scale Displays",
"HM Ds",
"Immersive Virtual Reality Experience",
"Single Environment",
"Networked Applications",
"Co Located Users",
"Immersed Users",
"Surround View Panoramic Images",
"Contextual Annotations",
"Large Scale Display Wall Affords",
"Nonimmersed Users",
"Contextual Information",
"Virtual Scene",
"Asymmetric Virtual Reality Collaboration",
"Nonimmersed Individuals",
"Shared Experience",
"Immersive Capabilities",
"Collaborative Affordances",
"Surround View Panoramas",
"Social Computing",
"Annotations",
"Stereo Image Processing",
"Collaboration",
"Virtual Environments",
"Virtual Reality",
"Immersive Experience",
"Interviews",
"Standards",
"Videos",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Virtual Reality",
"Human Centered Computing",
"Collaborative And Social Computing",
"Collaborative And Social Computing Systems And Tools"
],
"authors": [
{
"affiliation": "University of St. Thomas,St. Paul,MN,USA",
"fullName": "Leah Emerson",
"givenName": "Leah",
"surname": "Emerson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of St. Thomas,St. Paul,MN,USA",
"fullName": "Riley Lipinski",
"givenName": "Riley",
"surname": "Lipinski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of St. Thomas,St. Paul,MN,USA",
"fullName": "Heather Shirey",
"givenName": "Heather",
"surname": "Shirey",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of St. Thomas,St. Paul,MN,USA",
"fullName": "Theresa Malloy",
"givenName": "Theresa",
"surname": "Malloy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of St. Thomas,St. Paul,MN,USA",
"fullName": "Thomas Marrinan",
"givenName": "Thomas",
"surname": "Marrinan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "183-188",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeQBG8dxCM",
"name": "pismar-adjunct202112980-09585858s1-mm_129800a183.zip",
"size": "44.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar-adjunct202112980-09585858s1-mm_129800a183.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "129800a177",
"articleId": "1yeQH9z3Ve0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a189",
"articleId": "1yfxJcLBGyk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2017/04/07829404",
"title": "MR360: Mixed Reality Rendering for 360° Panoramic Videos",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07829404/13rRUwhHcQW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08651483",
"title": "MegaParallax: Casual 360° Panoramas with Motion Parallax",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08651483/17WX571UbUk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699261",
"title": "Visually Induced Motion Sickness in 360° Videos: Comparing and Combining Visual Optimization Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699261/19F1U8eRyMw",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a491",
"title": "Implementation of Attention-Based Spatial Audio for 360° Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a491/1J7Wlf9IrNC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090490",
"title": "Evaluation of Simulator Sickness for 360° Videos on an HMD Subject to Participants’ Experience with Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090490/1jIxwgIdgsw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wowmom/2020/7374/0/737400a191",
"title": "A QoE and Visual Attention Evaluation on the Influence of Audio in 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wowmom/2020/737400a191/1nMQCKTCoeY",
"parentPublication": {
"id": "proceedings/wowmom/2020/7374/0",
"title": "2020 IEEE 21st International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a071",
"title": "On Head Movements in Repeated 360° Video Quality Assessment for Standing and Seated Viewing on Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a071/1tnXBnBVgqc",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a510",
"title": "The Effect of Camera Height on The User Experience of Mid-air 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a510/1tnXMvwgvmg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a042",
"title": "Rating Duration Analysis for Subjective Quality Assessment of 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a042/1vg7TpMdSH6",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a176",
"title": "Now I’m Not Afraid: Reducing Fear of Missing Out in 360° Videos on a Head-Mounted Display using a Panoramic Thumbnail",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a176/1yeCYYdBmPC",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyr8Yts",
"title": "2015 48th Hawaii International Conference on System Sciences (HICSS)",
"acronym": "hicss",
"groupId": "1000730",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzICEKp",
"doi": "10.1109/HICSS.2015.46",
"title": "Understanding the Influences of Trend and Fatigue in Individuals' SNS Switching Intention",
"normalizedTitle": "Understanding the Influences of Trend and Fatigue in Individuals' SNS Switching Intention",
"abstract": "Integrating research work and latest insights from the popular press on users' SNS switching, this study considers the role of trend and fatigue in why people switch from a SNS. Specifically, we employ a PPM framework as a theoretical foundation, and enrich it with constructs derived from juxtaposing recent practitioner insights and relevant literature, i.e., Users' trend-seeking tendency, and SNS fatigue (activity overload and social monitoring concern). Through a survey of 305 SNS users, we find that factors previously derived based on PPM - dissatisfaction, alternatives attractiveness, peer influence, and switching cost -- indeed significantly influence users' switching intention. Furthermore, trend-seeking tendency, though not having a direct impact on switching intention, influences individuals' perceived alternative attractiveness. Similarly, social monitoring concern indirectly affects switching intention through alternative attractiveness, while also emanates its effect by raising user dissatisfaction. Lastly, SNS activity overload has both direct and indirect effects via alternative attractiveness and dissatisfaction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Integrating research work and latest insights from the popular press on users' SNS switching, this study considers the role of trend and fatigue in why people switch from a SNS. Specifically, we employ a PPM framework as a theoretical foundation, and enrich it with constructs derived from juxtaposing recent practitioner insights and relevant literature, i.e., Users' trend-seeking tendency, and SNS fatigue (activity overload and social monitoring concern). Through a survey of 305 SNS users, we find that factors previously derived based on PPM - dissatisfaction, alternatives attractiveness, peer influence, and switching cost -- indeed significantly influence users' switching intention. Furthermore, trend-seeking tendency, though not having a direct impact on switching intention, influences individuals' perceived alternative attractiveness. Similarly, social monitoring concern indirectly affects switching intention through alternative attractiveness, while also emanates its effect by raising user dissatisfaction. Lastly, SNS activity overload has both direct and indirect effects via alternative attractiveness and dissatisfaction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Integrating research work and latest insights from the popular press on users' SNS switching, this study considers the role of trend and fatigue in why people switch from a SNS. Specifically, we employ a PPM framework as a theoretical foundation, and enrich it with constructs derived from juxtaposing recent practitioner insights and relevant literature, i.e., Users' trend-seeking tendency, and SNS fatigue (activity overload and social monitoring concern). Through a survey of 305 SNS users, we find that factors previously derived based on PPM - dissatisfaction, alternatives attractiveness, peer influence, and switching cost -- indeed significantly influence users' switching intention. Furthermore, trend-seeking tendency, though not having a direct impact on switching intention, influences individuals' perceived alternative attractiveness. Similarly, social monitoring concern indirectly affects switching intention through alternative attractiveness, while also emanates its effect by raising user dissatisfaction. Lastly, SNS activity overload has both direct and indirect effects via alternative attractiveness and dissatisfaction.",
"fno": "7367a324",
"keywords": [
"Switches",
"Fatigue",
"Presses",
"Facebook",
"Monitoring",
"Market Research",
"SNS Fatigue",
"SNS Switching",
"PPM Framework",
"Trend Seeking"
],
"authors": [
{
"affiliation": null,
"fullName": "Xinlin Yao",
"givenName": "Xinlin",
"surname": "Yao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chee Wei Phang",
"givenName": "Chee Wei",
"surname": "Phang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hong Ling",
"givenName": "Hong",
"surname": "Ling",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2015-01-01T00:00:00",
"pubType": "proceedings",
"pages": "324-334",
"year": "2015",
"issn": "1530-1605",
"isbn": "978-1-4799-7367-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7367a314",
"articleId": "12OmNrNh0BE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7367a335",
"articleId": "12OmNBlXs8O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2014/2504/0/2504a551",
"title": "Do I Switch? Understanding Users' Intention to Switch between Social Network Sites",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2014/2504a551/12OmNqG0SOf",
"parentPublication": {
"id": "proceedings/hicss/2014/2504/0",
"title": "2014 47th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imis/2014/4331/0/4331a036",
"title": "Research for the Pattern Analysis of Individual Interest Using SNS Data: Focusing on Facebook",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2014/4331a036/12OmNwErpvA",
"parentPublication": {
"id": "proceedings/imis/2014/4331/0",
"title": "2014 Eighth International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2016/5670/0/5670a939",
"title": "Moving On: Predicting Continuance Intention on Social Networking Sites through Alternative Products",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670a939/12OmNxGSmm2",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2016/2846/0/07752329",
"title": "An analysis of sentiments on facebook during the 2016 U.S. presidential election",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2016/07752329/12OmNzwZ6sA",
"parentPublication": {
"id": "proceedings/asonam/2016/2846/0",
"title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2018/5892/0/08466528",
"title": "The Rental Right Policy Impact on Young People Rent and Purchase Intention",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2018/08466528/13JkragWTHe",
"parentPublication": {
"id": "proceedings/icis/2018/5892/0",
"title": "2018 IEEE/ACIS 17th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise-ie/2021/3829/0/382900a398",
"title": "The application of big data technology in the assessment model of college student’s entrepreneurial intention",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900a398/1C8GdwtnG48",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bcd/2022/6582/0/09900552",
"title": "A Case Study on the Continuous Usage Intention of Artificial Intelligence Speaker in Product Service System Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/bcd/2022/09900552/1H448n20few",
"parentPublication": {
"id": "proceedings/bcd/2022/6582/0",
"title": "2022 IEEE/ACIS 7th International Conference on Big Data, Cloud Computing, and Data Science (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKirI",
"title": "2018 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WHONlB",
"doi": "10.1109/CW.2018.00046",
"title": "Investigation on the Correlation between Eye Movement and Reaction Time under Mental Fatigue Influence",
"normalizedTitle": "Investigation on the Correlation between Eye Movement and Reaction Time under Mental Fatigue Influence",
"abstract": "With the recent development of eye tracking technology, research in eye movement and pattern has increased due to its potential to be a non-obstructive physiological measure tool. This study attempts to understand to which extent the eye behavior is relatable with human's mental chronometry in responding to changes subjected to different levels of mental fatigue. An analysis of the eye movement metrics when interacting with multiple short performance-based tasks under different states of mental fatigue is performed. It is concluded that the eye movement has influence in the resulting reaction time and the mental fatigue state of the individual. Thus, indicating the relationship as a strong potential to predict an individual's mental fatigue state. Another finding is that the relationship between the eye movement metrics and mental chronometry becomes stronger as the subjective mental fatigue level increases.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the recent development of eye tracking technology, research in eye movement and pattern has increased due to its potential to be a non-obstructive physiological measure tool. This study attempts to understand to which extent the eye behavior is relatable with human's mental chronometry in responding to changes subjected to different levels of mental fatigue. An analysis of the eye movement metrics when interacting with multiple short performance-based tasks under different states of mental fatigue is performed. It is concluded that the eye movement has influence in the resulting reaction time and the mental fatigue state of the individual. Thus, indicating the relationship as a strong potential to predict an individual's mental fatigue state. Another finding is that the relationship between the eye movement metrics and mental chronometry becomes stronger as the subjective mental fatigue level increases.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the recent development of eye tracking technology, research in eye movement and pattern has increased due to its potential to be a non-obstructive physiological measure tool. This study attempts to understand to which extent the eye behavior is relatable with human's mental chronometry in responding to changes subjected to different levels of mental fatigue. An analysis of the eye movement metrics when interacting with multiple short performance-based tasks under different states of mental fatigue is performed. It is concluded that the eye movement has influence in the resulting reaction time and the mental fatigue state of the individual. Thus, indicating the relationship as a strong potential to predict an individual's mental fatigue state. Another finding is that the relationship between the eye movement metrics and mental chronometry becomes stronger as the subjective mental fatigue level increases.",
"fno": "731500a207",
"keywords": [
"Fatigue",
"Task Analysis",
"Visualization",
"Atmospheric Measurements",
"Particle Measurements",
"Correlation",
"Eye Movement",
"Reaction Time",
"Mental Fatigue",
"Eye Hand Coordination",
"Eye Tracker"
],
"authors": [
{
"affiliation": null,
"fullName": "Vianney Renata",
"givenName": "Vianney",
"surname": "Renata",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Fan Li",
"givenName": "Fan",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ching-Hung Lee",
"givenName": "Ching-Hung",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chun-Hsien Chen",
"givenName": "Chun-Hsien",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "207-213",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7315-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "731500a199",
"articleId": "17D45WwsQ8m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "731500a214",
"articleId": "17D45XreC7s",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ichi/2017/4881/0/4881a275",
"title": "Fatigue Detection Model for Older Adults Using Eye-Tracking Data Gathered While Watching Video: Evaluation Against Diverse Fatiguing Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2017/4881a275/12OmNwK7o4c",
"parentPublication": {
"id": "proceedings/ichi/2017/4881/0",
"title": "2017 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2008/3304/4/3304d670",
"title": "Study on Physiological Mental Fatigue with Nonlinear Dynamics",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2008/3304d670/12OmNym2c4Y",
"parentPublication": {
"id": "proceedings/icnc/2008/3304/4",
"title": "2008 Fourth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2021/1762/0/176200a197",
"title": "A non-contact mental fatigue detection method for space medical experiment using multi-feature fusion model",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2021/176200a197/1AIMv6ARBo4",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2021/1762/0",
"title": "2021 IEEE International Conferences on Internet of Things (iThings) and IEEE Green Computing & Communications (GreenCom) and IEEE Cyber, Physical & Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a172",
"title": "Predicting Reading Performance based on Eye Movement Analysis with Hidden Markov Models",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a172/1FUUgXB4TCw",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a170",
"title": "Relationships between Oculo-Motor Mesures as Task-evoked Mental Workloads During an Manipulation Task",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a170/1cMFbe8mHx6",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/emip/2019/2243/0/224300a026",
"title": "Synchronized Analysis of Eye Movement and EEG during Program Comprehension",
"doi": null,
"abstractUrl": "/proceedings-article/emip/2019/224300a026/1dlvMfXpd5e",
"parentPublication": {
"id": "proceedings/emip/2019/2243/0",
"title": "2019 IEEE/ACM 6th International Workshop on Eye Movements in Programming (EMIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bracis/2019/4253/0/425300a407",
"title": "Residual MLP Network for Mental Fatigue Classification in Mining Workers from Brain Data",
"doi": null,
"abstractUrl": "/proceedings-article/bracis/2019/425300a407/1fHkKmysNdm",
"parentPublication": {
"id": "proceedings/bracis/2019/4253/0",
"title": "2019 8th Brazilian Conference on Intelligent Systems (BRACIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2019/2297/0/229700a247",
"title": "EEG-Based Cross-Subject Mental Fatigue Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2019/229700a247/1fHkpgR1Kfu",
"parentPublication": {
"id": "proceedings/cw/2019/2297/0",
"title": "2019 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a124",
"title": "Mental Fatigue of Long-Term Office Tasks in Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a124/1gysnb0tidq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aemcse/2021/1596/0/159600a176",
"title": "An efficient method for cross-subject EEG-based mental fatigue recognition",
"doi": null,
"abstractUrl": "/proceedings-article/aemcse/2021/159600a176/1wcda2a1SV2",
"parentPublication": {
"id": "proceedings/aemcse/2021/1596/0",
"title": "2021 4th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxlrWEUmc",
"doi": "10.1109/VRW50115.2020.00137",
"title": "Impact of AR Display Context Switching and Focal Distance Switching on Human Performance: Replication on an AR Haploscope",
"normalizedTitle": "Impact of AR Display Context Switching and Focal Distance Switching on Human Performance: Replication on an AR Haploscope",
"abstract": "In augmented reality (AR) environments, information is often distributed between real and virtual contexts, and often appears at different distances from the user. Therefore, to integrate the information, users must repeatedly switch context and refocus the eyes. Previously, Gabbard, Mehra, and Swan (2018) examined these issues, using a text-based visual search task and a monocular optical see-through AR display. In this work, the authors report a replication of this earlier experiment, using a custom-built AR haploscope. The successful replication, on a very different display, is consistent with the hypothesis that the findings are a general property of AR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In augmented reality (AR) environments, information is often distributed between real and virtual contexts, and often appears at different distances from the user. Therefore, to integrate the information, users must repeatedly switch context and refocus the eyes. Previously, Gabbard, Mehra, and Swan (2018) examined these issues, using a text-based visual search task and a monocular optical see-through AR display. In this work, the authors report a replication of this earlier experiment, using a custom-built AR haploscope. The successful replication, on a very different display, is consistent with the hypothesis that the findings are a general property of AR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In augmented reality (AR) environments, information is often distributed between real and virtual contexts, and often appears at different distances from the user. Therefore, to integrate the information, users must repeatedly switch context and refocus the eyes. Previously, Gabbard, Mehra, and Swan (2018) examined these issues, using a text-based visual search task and a monocular optical see-through AR display. In this work, the authors report a replication of this earlier experiment, using a custom-built AR haploscope. The successful replication, on a very different display, is consistent with the hypothesis that the findings are a general property of AR.",
"fno": "09090479",
"keywords": [
"Visualization",
"User Interfaces",
"Augmented Reality",
"Context Switching",
"Focal Distance Switching",
"Replication"
],
"authors": [
{
"affiliation": "Mississippi State University",
"fullName": "Mohammed Safayet Arefin",
"givenName": "Mohammed Safayet",
"surname": "Arefin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mississippi State University",
"fullName": "Nate Phillips",
"givenName": "Nate",
"surname": "Phillips",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Otago",
"fullName": "Alexander Plopski",
"givenName": "Alexander",
"surname": "Plopski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech",
"fullName": "Joseph L. Gabbard",
"givenName": "Joseph L.",
"surname": "Gabbard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mississippi State University",
"fullName": "J. Edward Swan II",
"givenName": "J. Edward",
"surname": "Swan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "571-572",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090600",
"articleId": "1jIxwQO6LXa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090634",
"articleId": "1jIxkrgIlEY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2007/1749/0/04538832",
"title": "Evaluating Display Types for AR Selection and Annotation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2007/04538832/12OmNrIaef4",
"parentPublication": {
"id": "proceedings/ismar/2007/1749/0",
"title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icppw/2011/4511/0/4511a063",
"title": "AR-Based Positioning for Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/icppw/2011/4511a063/12OmNwwuE0H",
"parentPublication": {
"id": "proceedings/icppw/2011/4511/0",
"title": "2011 40th International Conference on Parallel Processing Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671786",
"title": "Interaction techniques for HMD-HHD hybrid AR systems",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671786/12OmNyxFKaD",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08353823",
"title": "Effects of AR Display Context Switching and Focal Distance Switching on Human Performance",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08353823/13rRUwInvBe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714039",
"title": "The Effect of Context Switching, Focal Switching Distance, Binocular and Monocular Viewing, and Transient Focal Blur on Human Performance in Optical See-Through Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714039/1B0Y24wmlm8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a876",
"title": "HoloCMDS: Investigating Around Field of View Glanceable Commands Selection in AR-HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a876/1CJdZ8RwdnG",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a786",
"title": "An Examination on Reduction of Displayed Character Shake while Walking in Place with AR Glasses",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a786/1CJf8OTaee4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a796",
"title": "A Replication Study to Measure the Perceived Three-Dimensional Location of Virtual Objects in Optical See Through Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a796/1CJfrSkdYUE",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089433",
"title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a526",
"title": "Text Selection in AR-HMD Using a Smartphone as an Input Device",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a526/1tnXhwEI6RO",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeQC2Aw0De",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00029",
"title": "Effects of a Distracting Background and Focal Switching Distance in an Augmented Reality System",
"normalizedTitle": "Effects of a Distracting Background and Focal Switching Distance in an Augmented Reality System",
"abstract": "Many augmented reality (AR) applications require observers to shift their gaze between AR and real-world content. To date, commercial optical see-through (OST) AR displays have presented content at either a single focal distance, or at a small number of fixed focal distances. Meanwhile, real-world stimuli can occur at a variety of focal distances. Therefore, when shifting gaze between AR and real-world content, in order to view new content in sharp focus, observers must often change their eye’s accommodative state. When performed repetitively, this can negatively affect task performance and eye fatigue. However, these effects may be under reported, because past research has not yet considered the potential additional effect of distracting real world backgrounds.An experimental method that analyzes background effects is presented, using a text-based visual search task that requires integrating information presented in both AR and the real world. An experiment is reported, which examined the effect of a distracting background versus a blank background, at focal switching distances of 0, 1.33, 2.0, and 3.33 meters. Qualitatively, a majority of the participants reported that the distracting background made the task more difficult and fatiguing. Quantitatively, increasing the focal switching distance resulted in reduced task performance and increased eye fatigue. However, changing the background, between blank and distracting, did not result in significant measured differences. Suggestions are given for further efforts to examine background effects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many augmented reality (AR) applications require observers to shift their gaze between AR and real-world content. To date, commercial optical see-through (OST) AR displays have presented content at either a single focal distance, or at a small number of fixed focal distances. Meanwhile, real-world stimuli can occur at a variety of focal distances. Therefore, when shifting gaze between AR and real-world content, in order to view new content in sharp focus, observers must often change their eye’s accommodative state. When performed repetitively, this can negatively affect task performance and eye fatigue. However, these effects may be under reported, because past research has not yet considered the potential additional effect of distracting real world backgrounds.An experimental method that analyzes background effects is presented, using a text-based visual search task that requires integrating information presented in both AR and the real world. An experiment is reported, which examined the effect of a distracting background versus a blank background, at focal switching distances of 0, 1.33, 2.0, and 3.33 meters. Qualitatively, a majority of the participants reported that the distracting background made the task more difficult and fatiguing. Quantitatively, increasing the focal switching distance resulted in reduced task performance and increased eye fatigue. However, changing the background, between blank and distracting, did not result in significant measured differences. Suggestions are given for further efforts to examine background effects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many augmented reality (AR) applications require observers to shift their gaze between AR and real-world content. To date, commercial optical see-through (OST) AR displays have presented content at either a single focal distance, or at a small number of fixed focal distances. Meanwhile, real-world stimuli can occur at a variety of focal distances. Therefore, when shifting gaze between AR and real-world content, in order to view new content in sharp focus, observers must often change their eye’s accommodative state. When performed repetitively, this can negatively affect task performance and eye fatigue. However, these effects may be under reported, because past research has not yet considered the potential additional effect of distracting real world backgrounds.An experimental method that analyzes background effects is presented, using a text-based visual search task that requires integrating information presented in both AR and the real world. An experiment is reported, which examined the effect of a distracting background versus a blank background, at focal switching distances of 0, 1.33, 2.0, and 3.33 meters. Qualitatively, a majority of the participants reported that the distracting background made the task more difficult and fatiguing. Quantitatively, increasing the focal switching distance resulted in reduced task performance and increased eye fatigue. However, changing the background, between blank and distracting, did not result in significant measured differences. Suggestions are given for further efforts to examine background effects.",
"fno": "129800a096",
"keywords": [
"Augmented Reality",
"Data Visualisation",
"Human Factors",
"Visual Perception",
"Real World Stimuli",
"Shifting Gaze",
"Real World Content",
"Observers",
"Eye Fatigue",
"World Backgrounds",
"Background Effects",
"Text Based Visual Search Task",
"Distracting Background",
"Blank Background",
"Focal Switching Distance",
"Reduced Task Performance",
"Augmented Reality",
"Single Focal Distance",
"Fixed Focal Distances",
"Meters",
"Visualization",
"Atmospheric Measurements",
"Optical Switches",
"Observers",
"Fatigue",
"Particle Measurements",
"Augmented Reality",
"Focal Distance Switching",
"Accommodation",
"Background"
],
"authors": [
{
"affiliation": "Mississippi State University",
"fullName": "Mohammed Safayet Arefin",
"givenName": "Mohammed Safayet",
"surname": "Arefin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mississippi State University",
"fullName": "Nate Phillips",
"givenName": "Nate",
"surname": "Phillips",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Otago",
"fullName": "Alexander Plopski",
"givenName": "Alexander",
"surname": "Plopski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mississippi State University",
"fullName": "J. Edward Swan",
"givenName": "J. Edward",
"surname": "Swan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "96-99",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "129800a092",
"articleId": "1yeQMUBBz5m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a100",
"articleId": "1yeQLc7toIM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2008/2840/0/04637360",
"title": "Perception thresholds for augmented reality navigation schemes in large distances",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2008/04637360/12OmNC1GuiP",
"parentPublication": {
"id": "proceedings/ismar/2008/2840/0",
"title": "2008 7th IEEE/ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/Ismar-mashd/2014/6887/0/06935433",
"title": "AR Petite Theater: Augmented reality storybook for supporting children's empathy behavior",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2014/06935433/12OmNC1Y5qv",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2014/6887/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08353823",
"title": "Effects of AR Display Context Switching and Focal Distance Switching on Human Performance",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08353823/13rRUwInvBe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/02/08462792",
"title": "The Effect of Focal Distance, Age, and Brightness on Near-Field Augmented Reality Depth Matching",
"doi": null,
"abstractUrl": "/journal/tg/2020/02/08462792/13w3loWnQPK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2018/7315/0/731500a207",
"title": "Investigation on the Correlation between Eye Movement and Reaction Time under Mental Fatigue Influence",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2018/731500a207/17D45WHONlB",
"parentPublication": {
"id": "proceedings/cw/2018/7315/0",
"title": "2018 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714039",
"title": "The Effect of Context Switching, Focal Switching Distance, Binocular and Monocular Viewing, and Transient Focal Blur on Human Performance in Optical See-Through Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714039/1B0Y24wmlm8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798095",
"title": "Distance Judgments to On- and Off-Ground Objects in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798095/1cJ0Yxz6rrG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a098",
"title": "Food Talks: Visual and Interaction Principles for Representing Environmental and Nutritional Food Information in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a098/1gysj4CL9YI",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090479",
"title": "Impact of AR Display Context Switching and Focal Distance Switching on Human Performance: Replication on an AR Haploscope",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090479/1jIxlrWEUmc",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a739",
"title": "[DC] Psychophysical Effects of Augmented Reality Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a739/1tnWQJT7eWA",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxaw597",
"title": "2017 XXVI International Conference on Information, Communication and Automation Technologies (ICAT)",
"acronym": "icat",
"groupId": "1002979",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAJVcCS",
"doi": "10.1109/ICAT.2017.8171600",
"title": "Robust brake linings friction coefficient estimation for enhancement of ehb control",
"normalizedTitle": "Robust brake linings friction coefficient estimation for enhancement of ehb control",
"abstract": "The latest braking system architectures for Hybrid (HEV) and Full Electric Vehicles (EV) feature the adoption of the X-by-wire solutions, namely electro-hydraulic (EHB) and electro-mechanical (EMB) braking systems, aimed at providing additional flexibility to the distinctive functions of brake blending and regeneration. Regenerative brakes still need to be supported by conventional friction brakes because of failures occurrence, fully-charged battery conditions, and unexpected variations of the tire-road friction coefficient. In order to achieve a smooth coordinated action between the regenerative and the conventional friction brakes, the brake linings coefficient of friction (BLCF) needs to be monitored. The main contribution of this work lies on the estimation of the BLCF using a tire-model-less approach. In particular, two different observer designs are proposed and compared. Whereas the proposed approach does not rely on any fixed tire modelization, the state estimation is robust against variations in the road friction characteristics and tire uncertainties caused by inflating pressure variations, wear, and aging. The functionality of the developed observers is tested in IPG CarMaker® by employing an experimentally validated EV, equipped with four onboard motors and an EHB system. Braking events are simulated at different deceleration levels on both dry and wet surfaces. Finally, the compensation function against variations in the BLCF is implemented in the EHB controller to achieve constant deceleration levels. Authors envisage that the precise knowledge of the BLCF will contribute to enhance the braking performance and to actively monitor the brake pad wear under different working conditions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The latest braking system architectures for Hybrid (HEV) and Full Electric Vehicles (EV) feature the adoption of the X-by-wire solutions, namely electro-hydraulic (EHB) and electro-mechanical (EMB) braking systems, aimed at providing additional flexibility to the distinctive functions of brake blending and regeneration. Regenerative brakes still need to be supported by conventional friction brakes because of failures occurrence, fully-charged battery conditions, and unexpected variations of the tire-road friction coefficient. In order to achieve a smooth coordinated action between the regenerative and the conventional friction brakes, the brake linings coefficient of friction (BLCF) needs to be monitored. The main contribution of this work lies on the estimation of the BLCF using a tire-model-less approach. In particular, two different observer designs are proposed and compared. Whereas the proposed approach does not rely on any fixed tire modelization, the state estimation is robust against variations in the road friction characteristics and tire uncertainties caused by inflating pressure variations, wear, and aging. The functionality of the developed observers is tested in IPG CarMaker® by employing an experimentally validated EV, equipped with four onboard motors and an EHB system. Braking events are simulated at different deceleration levels on both dry and wet surfaces. Finally, the compensation function against variations in the BLCF is implemented in the EHB controller to achieve constant deceleration levels. Authors envisage that the precise knowledge of the BLCF will contribute to enhance the braking performance and to actively monitor the brake pad wear under different working conditions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The latest braking system architectures for Hybrid (HEV) and Full Electric Vehicles (EV) feature the adoption of the X-by-wire solutions, namely electro-hydraulic (EHB) and electro-mechanical (EMB) braking systems, aimed at providing additional flexibility to the distinctive functions of brake blending and regeneration. Regenerative brakes still need to be supported by conventional friction brakes because of failures occurrence, fully-charged battery conditions, and unexpected variations of the tire-road friction coefficient. In order to achieve a smooth coordinated action between the regenerative and the conventional friction brakes, the brake linings coefficient of friction (BLCF) needs to be monitored. The main contribution of this work lies on the estimation of the BLCF using a tire-model-less approach. In particular, two different observer designs are proposed and compared. Whereas the proposed approach does not rely on any fixed tire modelization, the state estimation is robust against variations in the road friction characteristics and tire uncertainties caused by inflating pressure variations, wear, and aging. The functionality of the developed observers is tested in IPG CarMaker® by employing an experimentally validated EV, equipped with four onboard motors and an EHB system. Braking events are simulated at different deceleration levels on both dry and wet surfaces. Finally, the compensation function against variations in the BLCF is implemented in the EHB controller to achieve constant deceleration levels. Authors envisage that the precise knowledge of the BLCF will contribute to enhance the braking performance and to actively monitor the brake pad wear under different working conditions.",
"fno": "08171600",
"keywords": [
"Brakes",
"Tires",
"Friction",
"Observers",
"Wheels",
"Sensors",
"Electro Hydraulic Braking System",
"Brake By Wire",
"Electric Vehicle",
"Tire Model Less Approach",
"Brake Linings Friction Estimation"
],
"authors": [
{
"affiliation": "Technische Universität Ilmenau, Automotive Engineering Department, Ilmenau, Germany",
"fullName": "Vincenzo Ricciardi",
"givenName": "Vincenzo",
"surname": "Ricciardi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coventry University, School of Mechanical, Aerospace, and Automotive Engineering, Coventry, U.K.",
"fullName": "Manuel Acosta",
"givenName": "Manuel",
"surname": "Acosta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität Ilmenau, Automotive Engineering Department, Ilmenau, Germany",
"fullName": "Klaus Augsburg",
"givenName": "Klaus",
"surname": "Augsburg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coventry University, School of Mechanical, Aerospace, and Automotive Engineering, Coventry, U.K.",
"fullName": "Stratis Kanarachos",
"givenName": "Stratis",
"surname": "Kanarachos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität Ilmenau, Automotive Engineering Department, Ilmenau, Germany",
"fullName": "Valentin Ivanov",
"givenName": "Valentin",
"surname": "Ivanov",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icat",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-3337-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08171599",
"articleId": "12OmNyTOsjo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08171601",
"articleId": "12OmNvJXeCa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icitbs/2015/0464/0/0464a637",
"title": "Research on Detection System of Automobile Flat Slab Brake Tester",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2015/0464a637/12OmNAlvHRU",
"parentPublication": {
"id": "proceedings/icitbs/2015/0464/0",
"title": "2015 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2014/5880/0/07051980",
"title": "Tire-road friction estimation utilizing smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2014/07051980/12OmNAlvHuw",
"parentPublication": {
"id": "proceedings/iri/2014/5880/0",
"title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2016/2312/0/2312a675",
"title": "The Application and Control Optimization of Electronic Park Brake in the Park Assist",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a675/12OmNApLGBb",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdc/2000/6638/1/00912806",
"title": "Adaptive emergency braking control using a dynamic tire/road friction model",
"doi": null,
"abstractUrl": "/proceedings-article/cdc/2000/00912806/12OmNBVIUyP",
"parentPublication": {
"id": "proceedings/cdc/2000/6638/1",
"title": "Proceedings of the 39th IEEE Conference on Decision and Control",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a224",
"title": "Estimation of Maximum Tire-Road Friction Based on Dynamic Model Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a224/12OmNqBbHZ1",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2008/3357/2/3357c418",
"title": "Research on Road Friction Coefficient Estimation Algorithm Based on Extended Kalman Filter",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2008/3357c418/12OmNxRWIdl",
"parentPublication": {
"id": "icicta/2008/3357/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iis/2009/3618/0/3618a344",
"title": "Research on Electro-Hydraulic Brake System for Vehicle Stability",
"doi": null,
"abstractUrl": "/proceedings-article/iis/2009/3618a344/12OmNy5hRgV",
"parentPublication": {
"id": "proceedings/iis/2009/3618/0",
"title": "2009 International Conference on Industrial and Information Systems (IIS 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a056",
"title": "Maximum Tire Road Friction Estimation Based on Modified Dugoff Tire Model",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a056/12OmNyNQSMk",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2013/2549/0/06746541",
"title": "Analyzing of Dynamic Friction Model for the Tire and the Runway",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2013/06746541/12OmNzYwbW5",
"parentPublication": {
"id": "proceedings/cis/2013/2549/0",
"title": "2013 Ninth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sti/2022/9045/0/10103349",
"title": "Design and Fabrication of an Automotive Electromagnetic Braking System",
"doi": null,
"abstractUrl": "/proceedings-article/sti/2022/10103349/1MBEYkJlCIo",
"parentPublication": {
"id": "proceedings/sti/2022/9045/0",
"title": "2022 4th International Conference on Sustainable Technologies for Industry 4.0 (STI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNs4S8wz",
"title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)",
"acronym": "iri",
"groupId": "1001046",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAlvHuw",
"doi": "10.1109/IRI.2014.7051980",
"title": "Tire-road friction estimation utilizing smartphones",
"normalizedTitle": "Tire-road friction estimation utilizing smartphones",
"abstract": "Tire-road friction is an important parameter for a number of different safety features present in modern-day vehicles, and the knowledge of this friction may also prove useful to the driver of a vehicle while it is in motion. In particular, this information may help inform a driver of dangerous low-traction situations that he or she may need to be aware of. Furthermore, since a growing number of drivers have access to Bluetooth-enabled smartphones, it is worth exploring how these devices may be leveraged in concert with vehicular CAN-bus networks to provide valuable safety information.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Tire-road friction is an important parameter for a number of different safety features present in modern-day vehicles, and the knowledge of this friction may also prove useful to the driver of a vehicle while it is in motion. In particular, this information may help inform a driver of dangerous low-traction situations that he or she may need to be aware of. Furthermore, since a growing number of drivers have access to Bluetooth-enabled smartphones, it is worth exploring how these devices may be leveraged in concert with vehicular CAN-bus networks to provide valuable safety information.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Tire-road friction is an important parameter for a number of different safety features present in modern-day vehicles, and the knowledge of this friction may also prove useful to the driver of a vehicle while it is in motion. In particular, this information may help inform a driver of dangerous low-traction situations that he or she may need to be aware of. Furthermore, since a growing number of drivers have access to Bluetooth-enabled smartphones, it is worth exploring how these devices may be leveraged in concert with vehicular CAN-bus networks to provide valuable safety information.",
"fno": "07051980",
"keywords": [
"Vehicles",
"Friction",
"Wheels",
"Safety",
"Roads",
"Acceleration",
"Estimation",
"Traction",
"Vehicular Safety",
"CAN Bus",
"Friction",
"Mobile Computing"
],
"authors": [
{
"affiliation": "Department of Computer Science & Engineering, University of North Texas, Dentón, Texas",
"fullName": "Michael Jaynes",
"givenName": "Michael",
"surname": "Jaynes",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science & Engineering, University of North Texas, Dentón, Texas",
"fullName": "Ram Dantu",
"givenName": "Ram",
"surname": "Dantu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iri",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "855-858",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-5880-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07051979",
"articleId": "12OmNvSKO0O",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07051981",
"articleId": "12OmNBIWXAM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icat/2017/3337/0/08171600",
"title": "Robust brake linings friction coefficient estimation for enhancement of ehb control",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2017/08171600/12OmNAJVcCS",
"parentPublication": {
"id": "proceedings/icat/2017/3337/0",
"title": "2017 XXVI International Conference on Information, Communication and Automation Technologies (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdc/2000/6638/1/00912806",
"title": "Adaptive emergency braking control using a dynamic tire/road friction model",
"doi": null,
"abstractUrl": "/proceedings-article/cdc/2000/00912806/12OmNBVIUyP",
"parentPublication": {
"id": "proceedings/cdc/2000/6638/1",
"title": "Proceedings of the 39th IEEE Conference on Decision and Control",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icie/2010/4080/2/05571373",
"title": "Simulation and Analysis of Tire Lateral Self-excited Vibration Based on MSC.Marc User Subroutine",
"doi": null,
"abstractUrl": "/proceedings-article/icie/2010/05571373/12OmNClQ0sd",
"parentPublication": {
"id": "proceedings/icie/2010/4080/2",
"title": "Information Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a224",
"title": "Estimation of Maximum Tire-Road Friction Based on Dynamic Model Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a224/12OmNqBbHZ1",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a090",
"title": "Research on Maximum Road Adhesion Coefficient Estimation for Distributed Drive Electric Vehicle",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a090/12OmNsbY6KV",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2008/3357/2/3357c418",
"title": "Research on Road Friction Coefficient Estimation Algorithm Based on Extended Kalman Filter",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2008/3357c418/12OmNxRWIdl",
"parentPublication": {
"id": "icicta/2008/3357/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a056",
"title": "Maximum Tire Road Friction Estimation Based on Modified Dugoff Tire Model",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a056/12OmNyNQSMk",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wsc/2000/6579/1/65791025",
"title": "Tire model for simulations of vehicle motion on high and low friction road surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/wsc/2000/65791025/12OmNylKAZW",
"parentPublication": {
"id": "proceedings/wsc/2000/6579/1",
"title": "Winter Simulation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2013/2549/0/06746541",
"title": "Analyzing of Dynamic Friction Model for the Tire and the Runway",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2013/06746541/12OmNzYwbW5",
"parentPublication": {
"id": "proceedings/cis/2013/2549/0",
"title": "2013 Ninth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2017/1618/0/161800a222",
"title": "The Improved Algorithm for Identifying the Vehicle Road Adhesion Coefficient",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2017/161800a222/1dUn7c3a8Ks",
"parentPublication": {
"id": "proceedings/icnisc/2017/1618/0",
"title": "2017 International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCaLEnw",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"acronym": "maee",
"groupId": "1802877",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqBbHZ1",
"doi": "10.1109/MAEE.2013.63",
"title": "Estimation of Maximum Tire-Road Friction Based on Dynamic Model Reconstruction",
"normalizedTitle": "Estimation of Maximum Tire-Road Friction Based on Dynamic Model Reconstruction",
"abstract": "The maximum tire road friction coefficient greatly affect the dynamic vehicle response under defined driving conditions, which makes it a crucial parameter to vehicle dynamics control and active safety systems. A maximum road friction estimation scheme was proposed in this paper, in which the instantaneous tire friction was obtained by analyzing the vehicle dynamics response, and a new transient frictionwheel slip line with one unique peak value was reshaped based on reconstruction of a dynamic tire friction model. Simulation works were completed under various driving condition, which demonstrated the validation of the proposed algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The maximum tire road friction coefficient greatly affect the dynamic vehicle response under defined driving conditions, which makes it a crucial parameter to vehicle dynamics control and active safety systems. A maximum road friction estimation scheme was proposed in this paper, in which the instantaneous tire friction was obtained by analyzing the vehicle dynamics response, and a new transient frictionwheel slip line with one unique peak value was reshaped based on reconstruction of a dynamic tire friction model. Simulation works were completed under various driving condition, which demonstrated the validation of the proposed algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The maximum tire road friction coefficient greatly affect the dynamic vehicle response under defined driving conditions, which makes it a crucial parameter to vehicle dynamics control and active safety systems. A maximum road friction estimation scheme was proposed in this paper, in which the instantaneous tire friction was obtained by analyzing the vehicle dynamics response, and a new transient frictionwheel slip line with one unique peak value was reshaped based on reconstruction of a dynamic tire friction model. Simulation works were completed under various driving condition, which demonstrated the validation of the proposed algorithm.",
"fno": "4975a224",
"keywords": [
"Friction",
"Roads",
"Vehicle Dynamics",
"Estimation",
"Tires",
"Wheels",
"Mathematical Model",
"Model Reconstruction",
"Friction Coefficient",
"Friction Estimation",
"Road Condition",
"Tire Model"
],
"authors": [
{
"affiliation": null,
"fullName": "Mingyuan Bian",
"givenName": "Mingyuan",
"surname": "Bian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Long Chen",
"givenName": "Long",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yugong Luo",
"givenName": "Yugong",
"surname": "Luo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Keqiang Li",
"givenName": "Keqiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "maee",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-07-01T00:00:00",
"pubType": "proceedings",
"pages": "224-228",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-4975-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4975a219",
"articleId": "12OmNCfjeDz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4975a229",
"articleId": "12OmNykCcg6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icat/2017/3337/0/08171600",
"title": "Robust brake linings friction coefficient estimation for enhancement of ehb control",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2017/08171600/12OmNAJVcCS",
"parentPublication": {
"id": "proceedings/icat/2017/3337/0",
"title": "2017 XXVI International Conference on Information, Communication and Automation Technologies (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2014/5880/0/07051980",
"title": "Tire-road friction estimation utilizing smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2014/07051980/12OmNAlvHuw",
"parentPublication": {
"id": "proceedings/iri/2014/5880/0",
"title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdc/2000/6638/1/00912806",
"title": "Adaptive emergency braking control using a dynamic tire/road friction model",
"doi": null,
"abstractUrl": "/proceedings-article/cdc/2000/00912806/12OmNBVIUyP",
"parentPublication": {
"id": "proceedings/cdc/2000/6638/1",
"title": "Proceedings of the 39th IEEE Conference on Decision and Control",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icie/2010/4080/2/05571373",
"title": "Simulation and Analysis of Tire Lateral Self-excited Vibration Based on MSC.Marc User Subroutine",
"doi": null,
"abstractUrl": "/proceedings-article/icie/2010/05571373/12OmNClQ0sd",
"parentPublication": {
"id": "proceedings/icie/2010/4080/2",
"title": "Information Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a090",
"title": "Research on Maximum Road Adhesion Coefficient Estimation for Distributed Drive Electric Vehicle",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a090/12OmNsbY6KV",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2008/3357/2/3357c418",
"title": "Research on Road Friction Coefficient Estimation Algorithm Based on Extended Kalman Filter",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2008/3357c418/12OmNxRWIdl",
"parentPublication": {
"id": "icicta/2008/3357/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a056",
"title": "Maximum Tire Road Friction Estimation Based on Modified Dugoff Tire Model",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a056/12OmNyNQSMk",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wsc/2000/6579/1/65791025",
"title": "Tire model for simulations of vehicle motion on high and low friction road surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/wsc/2000/65791025/12OmNylKAZW",
"parentPublication": {
"id": "proceedings/wsc/2000/6579/1",
"title": "Winter Simulation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2013/2549/0/06746541",
"title": "Analyzing of Dynamic Friction Model for the Tire and the Runway",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2013/06746541/12OmNzYwbW5",
"parentPublication": {
"id": "proceedings/cis/2013/2549/0",
"title": "2013 Ninth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2017/1618/0/161800a222",
"title": "The Improved Algorithm for Identifying the Vehicle Road Adhesion Coefficient",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2017/161800a222/1dUn7c3a8Ks",
"parentPublication": {
"id": "proceedings/icnisc/2017/1618/0",
"title": "2017 International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCaLEnw",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"acronym": "maee",
"groupId": "1802877",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyNQSMk",
"doi": "10.1109/MAEE.2013.24",
"title": "Maximum Tire Road Friction Estimation Based on Modified Dugoff Tire Model",
"normalizedTitle": "Maximum Tire Road Friction Estimation Based on Modified Dugoff Tire Model",
"abstract": "Estimation of the tire-road friction using the signal of on-board sensors is very important for the vehicle dynamic control systems. This paper presented a tire -- Croad friction coefficient estimation algorithm based on a modified Dugoff model. The proposed algorithm first determined the tire slip ratio and the instantaneous longitudinal friction coefficient using vehicle and wheel dynamics parameters. Then, the friction coefficient was estimated through the modified Dugoff model with subsection method and converse solution method. The effectiveness and performance of the algorithm were demonstrated through vehicle dynamics simulations on different road surface conditions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Estimation of the tire-road friction using the signal of on-board sensors is very important for the vehicle dynamic control systems. This paper presented a tire -- Croad friction coefficient estimation algorithm based on a modified Dugoff model. The proposed algorithm first determined the tire slip ratio and the instantaneous longitudinal friction coefficient using vehicle and wheel dynamics parameters. Then, the friction coefficient was estimated through the modified Dugoff model with subsection method and converse solution method. The effectiveness and performance of the algorithm were demonstrated through vehicle dynamics simulations on different road surface conditions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Estimation of the tire-road friction using the signal of on-board sensors is very important for the vehicle dynamic control systems. This paper presented a tire -- Croad friction coefficient estimation algorithm based on a modified Dugoff model. The proposed algorithm first determined the tire slip ratio and the instantaneous longitudinal friction coefficient using vehicle and wheel dynamics parameters. Then, the friction coefficient was estimated through the modified Dugoff model with subsection method and converse solution method. The effectiveness and performance of the algorithm were demonstrated through vehicle dynamics simulations on different road surface conditions.",
"fno": "4975a056",
"keywords": [
"Friction",
"Roads",
"Tires",
"Estimation",
"Mathematical Model",
"Vehicles",
"Wheels",
"Converse Solution Method",
"Friction Coefficient",
"Dugoff Tire Model",
"Subsection Method"
],
"authors": [
{
"affiliation": null,
"fullName": "Long Chen",
"givenName": "Long",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mingyuan Bian",
"givenName": "Mingyuan",
"surname": "Bian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yugong Luo",
"givenName": "Yugong",
"surname": "Luo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Keqiang Li",
"givenName": "Keqiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "maee",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-07-01T00:00:00",
"pubType": "proceedings",
"pages": "56-61",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-4975-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4975a052",
"articleId": "12OmNqyDjpV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4975a062",
"articleId": "12OmNvkGW5p",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icat/2017/3337/0/08171600",
"title": "Robust brake linings friction coefficient estimation for enhancement of ehb control",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2017/08171600/12OmNAJVcCS",
"parentPublication": {
"id": "proceedings/icat/2017/3337/0",
"title": "2017 XXVI International Conference on Information, Communication and Automation Technologies (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2014/5880/0/07051980",
"title": "Tire-road friction estimation utilizing smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2014/07051980/12OmNAlvHuw",
"parentPublication": {
"id": "proceedings/iri/2014/5880/0",
"title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdc/2000/6638/1/00912806",
"title": "Adaptive emergency braking control using a dynamic tire/road friction model",
"doi": null,
"abstractUrl": "/proceedings-article/cdc/2000/00912806/12OmNBVIUyP",
"parentPublication": {
"id": "proceedings/cdc/2000/6638/1",
"title": "Proceedings of the 39th IEEE Conference on Decision and Control",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icie/2010/4080/2/05571373",
"title": "Simulation and Analysis of Tire Lateral Self-excited Vibration Based on MSC.Marc User Subroutine",
"doi": null,
"abstractUrl": "/proceedings-article/icie/2010/05571373/12OmNClQ0sd",
"parentPublication": {
"id": "proceedings/icie/2010/4080/2",
"title": "Information Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a224",
"title": "Estimation of Maximum Tire-Road Friction Based on Dynamic Model Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a224/12OmNqBbHZ1",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a090",
"title": "Research on Maximum Road Adhesion Coefficient Estimation for Distributed Drive Electric Vehicle",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a090/12OmNsbY6KV",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2015/7644/0/7644a036",
"title": "A Method to Design Weather-Responsive Speed Limit Subject to Visible Distance in the Absence of Road Surface Friction Coefficient",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2015/7644a036/12OmNwx3Qam",
"parentPublication": {
"id": "proceedings/icicta/2015/7644/0",
"title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2008/3357/2/3357c418",
"title": "Research on Road Friction Coefficient Estimation Algorithm Based on Extended Kalman Filter",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2008/3357c418/12OmNxRWIdl",
"parentPublication": {
"id": "icicta/2008/3357/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2017/1618/0/161800a222",
"title": "The Improved Algorithm for Identifying the Vehicle Road Adhesion Coefficient",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2017/161800a222/1dUn7c3a8Ks",
"parentPublication": {
"id": "proceedings/icnisc/2017/1618/0",
"title": "2017 International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2020/2314/0/231400a045",
"title": "Estimation of road adhesion coefficient for four-wheel independent drive electric vehicle",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2020/231400a045/1tzz2482NX2",
"parentPublication": {
"id": "proceedings/icmcce/2020/2314/0",
"title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzVXNJh",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvStcHt",
"doi": "10.1109/3DUI.2015.7131717",
"title": "Comparing the performance of natural, semi-natural, and non-natural locomotion techniques in virtual reality",
"normalizedTitle": "Comparing the performance of natural, semi-natural, and non-natural locomotion techniques in virtual reality",
"abstract": "One of the goals of much virtual reality (VR) research is to increase realism. In particular, many techniques for locomotion in VR attempt to approximate real-world walking. However, it is not yet fully understood how the design of more realistic locomotion techniques affects user task performance. We performed an experiment to compare a semi-natural locomotion technique (based on the Virtusphere device) with a traditional, non-natural technique (based on a game controller) and a fully natural technique (real walking). We found that the Virtusphere technique was significantly slower and less accurate than both of the other techniques. Based on this result and others in the literature, we speculate that locomotion techniques with moderate interaction fidelity will often have performance inferior to both high-fidelity techniques and well-designed low-fidelity techniques. We argue that our experimental results are an effect of interaction fidelity, and perform a detailed analysis of the fidelity of the three locomotion techniques to support this argument.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One of the goals of much virtual reality (VR) research is to increase realism. In particular, many techniques for locomotion in VR attempt to approximate real-world walking. However, it is not yet fully understood how the design of more realistic locomotion techniques affects user task performance. We performed an experiment to compare a semi-natural locomotion technique (based on the Virtusphere device) with a traditional, non-natural technique (based on a game controller) and a fully natural technique (real walking). We found that the Virtusphere technique was significantly slower and less accurate than both of the other techniques. Based on this result and others in the literature, we speculate that locomotion techniques with moderate interaction fidelity will often have performance inferior to both high-fidelity techniques and well-designed low-fidelity techniques. We argue that our experimental results are an effect of interaction fidelity, and perform a detailed analysis of the fidelity of the three locomotion techniques to support this argument.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One of the goals of much virtual reality (VR) research is to increase realism. In particular, many techniques for locomotion in VR attempt to approximate real-world walking. However, it is not yet fully understood how the design of more realistic locomotion techniques affects user task performance. We performed an experiment to compare a semi-natural locomotion technique (based on the Virtusphere device) with a traditional, non-natural technique (based on a game controller) and a fully natural technique (real walking). We found that the Virtusphere technique was significantly slower and less accurate than both of the other techniques. Based on this result and others in the literature, we speculate that locomotion techniques with moderate interaction fidelity will often have performance inferior to both high-fidelity techniques and well-designed low-fidelity techniques. We argue that our experimental results are an effect of interaction fidelity, and perform a detailed analysis of the fidelity of the three locomotion techniques to support this argument.",
"fno": "07131717",
"keywords": [
"Legged Locomotion",
"Force",
"Foot",
"Tracking",
"Performance Evaluation",
"Three Dimensional Displays",
"Games",
"Virtusphere",
"Interaction Fidelity",
"Effectiveness",
"Locomotion Interaction"
],
"authors": [
{
"affiliation": "Center for Human-Computer Interaction and Department of Computer Science, Virginia Tech, USA",
"fullName": "Mahdi Nabiyouni",
"givenName": "Mahdi",
"surname": "Nabiyouni",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Human-Computer Interaction and Department of Computer Science, Virginia Tech, USA",
"fullName": "Ayshwarya Saktheeswaran",
"givenName": "Ayshwarya",
"surname": "Saktheeswaran",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Human-Computer Interaction and Department of Computer Science, Virginia Tech, USA",
"fullName": "Doug A. Bowman",
"givenName": "Doug A.",
"surname": "Bowman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Human-Computer Interaction and Department of Computer Science, Virginia Tech, USA",
"fullName": "Ambika Karanth",
"givenName": "Ambika",
"surname": "Karanth",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "3-10",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-6886-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07131716",
"articleId": "12OmNBbsiiA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07131718",
"articleId": "12OmNBNM8RA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223423",
"title": "Tracking human locomotion by relative positional feet tracking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223423/12OmNAZOJVa",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550193",
"title": "Tapping-In-Place: Increasing the naturalness of immersive walking-in-place locomotion through novel gestural input",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550193/12OmNAnMuyq",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131718",
"title": "Design and evaluation of a visual acclimation aid for a semi-natural locomotion device",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131718/12OmNBNM8RA",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492762",
"title": "Comparing VE locomotion interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492762/12OmNx8fi8K",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223386",
"title": "Comparing the performance of natural, semi-natural, and non-natural locomotion techniques in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223386/12OmNx9nGM1",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446165",
"title": "A Threefold Approach for Precise and Efficient Locomotion in Virtual Environments with Varying Accessibility",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446165/13bd1AIBM28",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448288",
"title": "Experiencing an Invisible World War I Battlefield Through Narrative-Driven Redirected Walking in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448288/13bd1fZBGdu",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404569",
"title": "Establishing the Range of Perceptually Natural Visual Walking Speeds for Virtual Walking-In-Place Locomotion",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404569/13rRUxAASTb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a696",
"title": "Seamless-walk: Novel Natural Virtual Reality Locomotion Method with a High-Resolution Tactile Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a696/1CJeXaYYtd6",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a346",
"title": "Spring Stepper: A Seated VR Locomotion Controller",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a346/1oZBBswUSzK",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBQ2VPd",
"title": "Computer Animation",
"acronym": "ca",
"groupId": "1000121",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAYXWx5",
"doi": "10.1109/CA.2000.889036",
"title": "An Integrated Approach towards the Representation, Manipulation and Reuse of Pre-Recorded Motion",
"normalizedTitle": "An Integrated Approach towards the Representation, Manipulation and Reuse of Pre-Recorded Motion",
"abstract": "A current trend in computer animation research is the uses of motion capture techniques to generate libraries of basic motions. This paper presents an approach to the storage and reuse of motion capture data that integrates an adaptive filter for noise reduction with a functional representation that is highly efficient. Each motion sequence is represented as a set of non-uniform B-spline curves attached to a hierarchical model. Based on this representation a set of operations is developed that, when applied to these curves, facilitates the reuse and manipulation of pre-recorded motion. These operations include the generation of smooth transitions between motion clips, the addition of exaggerated movements to motion data, motion interpolation and motion warping.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A current trend in computer animation research is the uses of motion capture techniques to generate libraries of basic motions. This paper presents an approach to the storage and reuse of motion capture data that integrates an adaptive filter for noise reduction with a functional representation that is highly efficient. Each motion sequence is represented as a set of non-uniform B-spline curves attached to a hierarchical model. Based on this representation a set of operations is developed that, when applied to these curves, facilitates the reuse and manipulation of pre-recorded motion. These operations include the generation of smooth transitions between motion clips, the addition of exaggerated movements to motion data, motion interpolation and motion warping.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A current trend in computer animation research is the uses of motion capture techniques to generate libraries of basic motions. This paper presents an approach to the storage and reuse of motion capture data that integrates an adaptive filter for noise reduction with a functional representation that is highly efficient. Each motion sequence is represented as a set of non-uniform B-spline curves attached to a hierarchical model. Based on this representation a set of operations is developed that, when applied to these curves, facilitates the reuse and manipulation of pre-recorded motion. These operations include the generation of smooth transitions between motion clips, the addition of exaggerated movements to motion data, motion interpolation and motion warping.",
"fno": "06830056",
"keywords": [],
"authors": [
{
"affiliation": "Siemens Corporate Research",
"fullName": "Sandra Sudarsky",
"givenName": "Sandra",
"surname": "Sudarsky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Texas A&M University",
"fullName": "Donald House",
"givenName": "Donald",
"surname": "House",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ca",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-05-01T00:00:00",
"pubType": "proceedings",
"pages": "56",
"year": "2000",
"issn": null,
"isbn": "0-7695-0683-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06830049",
"articleId": "12OmNx5YvbD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06830062",
"articleId": "12OmNy7QfkD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvRU0cM",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqHqSB7",
"doi": "10.1109/ISMAR-Adjunct.2017.96",
"title": "BoostHand : Distance-free Object Manipulation System with Switchable Non-linear Mapping for Augmented Reality Classrooms",
"normalizedTitle": "BoostHand : Distance-free Object Manipulation System with Switchable Non-linear Mapping for Augmented Reality Classrooms",
"abstract": "In this paper, we propose BoostHand, a freehand, distance-free object-manipulation system that supports simple trigger gestures using Leap Motion. In AR classrooms, it is necessary to allow both lecturers and students to utilize virtual teaching materials without any spatial restrictions, while handling virtual objects easily, regardless of distance. To provide efficient and accurate methods of handling AR classroom objects, our system requires only simple intuitive freehand gestures to control the users virtual hands in an enlarged, shared control space of users. We modified the GoGo interaction technique [5] by adding simple trigger gestures, and we evaluated performance against gaze-assisted selection (GaS) capabilities. Our proposed system enables both lecturers and students to utilize virtual teaching materials easily from their remote positions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose BoostHand, a freehand, distance-free object-manipulation system that supports simple trigger gestures using Leap Motion. In AR classrooms, it is necessary to allow both lecturers and students to utilize virtual teaching materials without any spatial restrictions, while handling virtual objects easily, regardless of distance. To provide efficient and accurate methods of handling AR classroom objects, our system requires only simple intuitive freehand gestures to control the users virtual hands in an enlarged, shared control space of users. We modified the GoGo interaction technique [5] by adding simple trigger gestures, and we evaluated performance against gaze-assisted selection (GaS) capabilities. Our proposed system enables both lecturers and students to utilize virtual teaching materials easily from their remote positions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose BoostHand, a freehand, distance-free object-manipulation system that supports simple trigger gestures using Leap Motion. In AR classrooms, it is necessary to allow both lecturers and students to utilize virtual teaching materials without any spatial restrictions, while handling virtual objects easily, regardless of distance. To provide efficient and accurate methods of handling AR classroom objects, our system requires only simple intuitive freehand gestures to control the users virtual hands in an enlarged, shared control space of users. We modified the GoGo interaction technique [5] by adding simple trigger gestures, and we evaluated performance against gaze-assisted selection (GaS) capabilities. Our proposed system enables both lecturers and students to utilize virtual teaching materials easily from their remote positions.",
"fno": "6327a321",
"keywords": [
"Three Dimensional Displays",
"Aerospace Electronics",
"Avatars",
"Switches",
"Education",
"Electronic Mail"
],
"authors": [
{
"affiliation": null,
"fullName": "Whie Jung",
"givenName": "Whie",
"surname": "Jung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Woojin Cho",
"givenName": "Woojin",
"surname": "Cho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hayun Kim",
"givenName": "Hayun",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Woontack Woo",
"givenName": "Woontack",
"surname": "Woo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "321-325",
"year": "2017",
"issn": null,
"isbn": "978-0-7695-6327-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6327a315",
"articleId": "12OmNylKAYY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6327a326",
"articleId": "12OmNzlD9rq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isuvr/2017/3091/0/3091a010",
"title": "Duplication Based Distance-Free Freehand Virtual Object Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2017/3091a010/12OmNApcu9E",
"parentPublication": {
"id": "proceedings/isuvr/2017/3091/0",
"title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2017/5507/0/07960025",
"title": "A flexible finger-mounted airbrush model for immersive freehand painting",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2017/07960025/12OmNBV9Ikp",
"parentPublication": {
"id": "proceedings/icis/2017/5507/0",
"title": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504707",
"title": "Depth-based 3D gesture multi-level radial menu for virtual object manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504707/12OmNx3HI96",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iotdi/2018/6312/0/631201a060",
"title": "MARBLE: Mobile Augmented Reality Using a Distributed BLE Beacon Infrastructure",
"doi": null,
"abstractUrl": "/proceedings-article/iotdi/2018/631201a060/12OmNzGlRAW",
"parentPublication": {
"id": "proceedings/iotdi/2018/6312/0",
"title": "2018 IEEE/ACM Third International Conference on Internet-of-Things Design and Implementation (IoTDI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446381",
"title": "Interacting with Distant Objects in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446381/13bd1fdV4lT",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873991",
"title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873991/1GjwGcGrRmg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798340",
"title": "Augmented Reality Map Navigation with Freehand Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798340/1cJ1fg0gjAY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2019/4528/0/09069366",
"title": "Grey Island: Immersive tangible interaction through augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2019/09069366/1jdCYiSUwBG",
"parentPublication": {
"id": "proceedings/dsc/2019/4528/0",
"title": "2019 IEEE Fourth International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a144",
"title": "Distance Estimation with Mobile Augmented Reality in Action Space: Effects of Animated Cues",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a144/1tnWHFR464w",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a788",
"title": "Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a788/1tuAHZj29Q4",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAkEU4f",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvIfDQQ",
"doi": "10.1109/ICME.2011.6012069",
"title": "Trajectory based video object manipulation",
"normalizedTitle": "Trajectory based video object manipulation",
"abstract": "We propose an object centric representation for easy and intuitive navigation and manipulation of videos. Object centric representation allows a user to directly access and process objects as basic video components. We demonstrate a trajectory based interface and example operations, which allow users to retime, reorder, remove or clone video objects in a 'click and drag' fashion. This interface is created by extracting object motion information from the video. We use object detection and tracking to obtain spatiotemporal video object tube. The corresponding object motion trajectories are represented in a 3D (x, y, t) grid. Users can navigate and manipulate video objects by scrubbing or manipulating corresponding trajectories. We show some example applications of proposed inter face like object synchronization, saliency magnification, visual effects and composite video creation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose an object centric representation for easy and intuitive navigation and manipulation of videos. Object centric representation allows a user to directly access and process objects as basic video components. We demonstrate a trajectory based interface and example operations, which allow users to retime, reorder, remove or clone video objects in a 'click and drag' fashion. This interface is created by extracting object motion information from the video. We use object detection and tracking to obtain spatiotemporal video object tube. The corresponding object motion trajectories are represented in a 3D (x, y, t) grid. Users can navigate and manipulate video objects by scrubbing or manipulating corresponding trajectories. We show some example applications of proposed inter face like object synchronization, saliency magnification, visual effects and composite video creation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose an object centric representation for easy and intuitive navigation and manipulation of videos. Object centric representation allows a user to directly access and process objects as basic video components. We demonstrate a trajectory based interface and example operations, which allow users to retime, reorder, remove or clone video objects in a 'click and drag' fashion. This interface is created by extracting object motion information from the video. We use object detection and tracking to obtain spatiotemporal video object tube. The corresponding object motion trajectories are represented in a 3D (x, y, t) grid. Users can navigate and manipulate video objects by scrubbing or manipulating corresponding trajectories. We show some example applications of proposed inter face like object synchronization, saliency magnification, visual effects and composite video creation.",
"fno": "06012069",
"keywords": [
"Image Motion Analysis",
"Image Representation",
"Object Detection",
"Object Tracking",
"Video Retrieval",
"Video Signal Processing",
"Trajectory Based Video Object Manipulation",
"Object Centric Representation",
"Video Components",
"Trajectory Based Interface",
"Click And Drag Fashion",
"Object Motion Information Extraction",
"Object Motion Trajectories",
"Spatiotemporal Video Object Tube",
"Object Synchronization",
"Saliency Magnification",
"Visual Effects",
"Composite Video Creation",
"Object Detection",
"Object Tracking",
"Navigation",
"Trajectory",
"Tracking",
"Three Dimensional Displays",
"Electron Tubes",
"Object Detection",
"Spatiotemporal Phenomena",
"Motion Based Video Representation",
"Interactive Video Composition",
"Object Based Video Access"
],
"authors": [
{
"affiliation": "CVIT, IIIT Hyderabad, India",
"fullName": "Rajvi Shah",
"givenName": "Rajvi",
"surname": "Shah",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CVIT, IIIT Hyderabad, India",
"fullName": "P J Narayanan",
"givenName": "P J",
"surname": "Narayanan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-4",
"year": "2011",
"issn": "1945-7871",
"isbn": "978-1-61284-348-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06012068",
"articleId": "12OmNzZmZn5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06012070",
"articleId": "12OmNqBbI0B",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032e453",
"title": "Spatial-Aware Object Embeddings for Zero-Shot Localization and Classification of Actions",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032e453/12OmNrYlmCR",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b680",
"title": "Super-Trajectory for Video Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b680/12OmNxWLTjK",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d173",
"title": "Unsupervised Object Discovery and Tracking in Video Collections",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d173/12OmNyTwRh4",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169855",
"title": "Coherent event-based surveillance video synopsis using trajectory clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169855/12OmNyYm2F4",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2016/09/07330012",
"title": "Distance Threshold Similarity Searches: Efficient Trajectory Indexing on the GPU",
"doi": null,
"abstractUrl": "/journal/td/2016/09/07330012/13rRUB7a1fx",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/04/08325298",
"title": "Semi-Supervised Video Object Segmentation with Super-Trajectories",
"doi": null,
"abstractUrl": "/journal/tp/2019/04/08325298/13rRUEgs2Ng",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/07/ttg2013071218",
"title": "Timeline Editing of Objects in Video",
"doi": null,
"abstractUrl": "/journal/tg/2013/07/ttg2013071218/13rRUx0xPi7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/01/07837719",
"title": "Saliency-Aware Video Object Segmentation",
"doi": null,
"abstractUrl": "/journal/tp/2018/01/07837719/13rRUxCitzQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600d138",
"title": "Object-Region Video Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600d138/1H0L5iRzuQU",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d073",
"title": "GATSBI: Generative Agent-centric Spatio-temporal Object Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d073/1yeIkmvPMM8",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyoiYVr",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzXFoGR",
"doi": "10.1109/CVPR.2017.652",
"title": "Memory-Augmented Attribute Manipulation Networks for Interactive Fashion Search",
"normalizedTitle": "Memory-Augmented Attribute Manipulation Networks for Interactive Fashion Search",
"abstract": "We introduce a new fashion search protocol where attribute manipulation is allowed within the interaction between users and search engines, e.g. manipulating the color attribute of the clothing from red to blue. It is particularly useful for image-based search when the query image cannot perfectly match users expectation of the desired product. To build such a search engine, we propose a novel memory-augmented Attribute Manipulation Network (AMNet) which can manipulate image representation at the attribute level. Given a query image and some attributes that need to modify, AMNet can manipulate the intermediate representation encoding the unwanted attributes and change them to the desired ones through following four novel components: (1) a dual-path CNN architecture for discriminative deep attribute representation learning, (2) a memory block with an internal memory and a neural controller for prototype attribute representation learning and hosting, (3) an attribute manipulation network to modify the representation of the query image with the prototype feature retrieved from the memory block, (4) a loss layer which jointly optimizes the attribute classification loss and a triplet ranking loss over triplet images for facilitating precise attribute manipulation and image retrieving. Extensive experiments conducted on two large-scale fashion search datasets, i.e. DARN and DeepFashion, have demonstrated that AMNet is able to achieve remarkably good performance compared with well-designed baselines in terms of effectiveness of attribute manipulation and search accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce a new fashion search protocol where attribute manipulation is allowed within the interaction between users and search engines, e.g. manipulating the color attribute of the clothing from red to blue. It is particularly useful for image-based search when the query image cannot perfectly match users expectation of the desired product. To build such a search engine, we propose a novel memory-augmented Attribute Manipulation Network (AMNet) which can manipulate image representation at the attribute level. Given a query image and some attributes that need to modify, AMNet can manipulate the intermediate representation encoding the unwanted attributes and change them to the desired ones through following four novel components: (1) a dual-path CNN architecture for discriminative deep attribute representation learning, (2) a memory block with an internal memory and a neural controller for prototype attribute representation learning and hosting, (3) an attribute manipulation network to modify the representation of the query image with the prototype feature retrieved from the memory block, (4) a loss layer which jointly optimizes the attribute classification loss and a triplet ranking loss over triplet images for facilitating precise attribute manipulation and image retrieving. Extensive experiments conducted on two large-scale fashion search datasets, i.e. DARN and DeepFashion, have demonstrated that AMNet is able to achieve remarkably good performance compared with well-designed baselines in terms of effectiveness of attribute manipulation and search accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce a new fashion search protocol where attribute manipulation is allowed within the interaction between users and search engines, e.g. manipulating the color attribute of the clothing from red to blue. It is particularly useful for image-based search when the query image cannot perfectly match users expectation of the desired product. To build such a search engine, we propose a novel memory-augmented Attribute Manipulation Network (AMNet) which can manipulate image representation at the attribute level. Given a query image and some attributes that need to modify, AMNet can manipulate the intermediate representation encoding the unwanted attributes and change them to the desired ones through following four novel components: (1) a dual-path CNN architecture for discriminative deep attribute representation learning, (2) a memory block with an internal memory and a neural controller for prototype attribute representation learning and hosting, (3) an attribute manipulation network to modify the representation of the query image with the prototype feature retrieved from the memory block, (4) a loss layer which jointly optimizes the attribute classification loss and a triplet ranking loss over triplet images for facilitating precise attribute manipulation and image retrieving. Extensive experiments conducted on two large-scale fashion search datasets, i.e. DARN and DeepFashion, have demonstrated that AMNet is able to achieve remarkably good performance compared with well-designed baselines in terms of effectiveness of attribute manipulation and search accuracy.",
"fno": "0457g156",
"keywords": [
"Image Classification",
"Image Representation",
"Image Retrieval",
"Interactive Systems",
"Learning Artificial Intelligence",
"Neural Nets",
"Optimisation",
"Search Engines",
"Color Attribute",
"Search Engine",
"AM Net",
"Discriminative Deep Attribute Representation Learning",
"Memory Block",
"Internal Memory",
"Prototype Attribute Representation Learning",
"Hosting",
"Triplet Images",
"Memory Augmented Attribute Manipulation Networks",
"Interactive Fashion Search",
"Fashion Search Protocol",
"Image Retrieval",
"Image Based Search",
"Intermediate Representation Manipulation",
"Attribute Encoding",
"Dual Path CNN Architecture",
"Neural Controller",
"Query Image Representation",
"Triplet Ranking Loss",
"Attribute Classification Loss Optimization",
"Image Color Analysis",
"Clothing",
"Search Engines",
"Prototypes",
"Visualization",
"Image Representation",
"Computer Architecture"
],
"authors": [
{
"affiliation": null,
"fullName": "Bo Zhao",
"givenName": "Bo",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiashi Feng",
"givenName": "Jiashi",
"surname": "Feng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiao Wu",
"givenName": "Xiao",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shuicheng Yan",
"givenName": "Shuicheng",
"surname": "Yan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "6156-6164",
"year": "2017",
"issn": "1063-6919",
"isbn": "978-1-5386-0457-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0457g146",
"articleId": "12OmNB9t6pc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0457g165",
"articleId": "12OmNvIfDOg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457b225",
"title": "Learning Residual Images for Face Attribute Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b225/12OmNx8fihM",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601b671",
"title": "Efficient Multi-attribute Similarity Learning Towards Attribute-Based Fashion Search",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b671/12OmNyRg4fV",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b472",
"title": "Automatic Spatially-Aware Fashion Concept Discovery",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b472/12OmNzcPAFH",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000h708",
"title": "Learning Attribute Representations with Localization for Flexible Fashion Search",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000h708/17D45WXIkDT",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500d142",
"title": "Tailor Me: An Editing Network for Fashion Attribute Shape Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500d142/1B12Lbh5SRa",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200m2127",
"title": "Learning Attribute-driven Disentangled Representations for Interactive Fashion Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200m2127/1BmKKdEwsda",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3749",
"title": "Image Shape Manipulation from a Single Augmented Training Sample",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3749/1BmKNCO62iI",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a848",
"title": "Leveraging Off-the-shelf Diffusion Model for Multi-attribute Fashion Image Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a848/1L6LCFHWC6Q",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0540",
"title": "Attribute Manipulation Generative Adversarial Networks for Fashion Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0540/1hVloNEYY8w",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093416",
"title": "TailorGAN: Making User-Defined Fashion Designs",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093416/1jPbCu76ncY",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1IHotVZum6Q",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "9956007",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1IHq5bQd9BK",
"doi": "10.1109/ICPR56361.2022.9955634",
"title": "Hierarchical Segmentation of Human Manipulation Movements",
"normalizedTitle": "Hierarchical Segmentation of Human Manipulation Movements",
"abstract": "This paper introduces a segmentation algorithm which splits complex human manipulation movements into movement segments and automatically groups these into labeled actions. With this hierarchical algorithm the basic movement identities, which we call building blocks, as well as their concatenation to more complex actions can be identified in one handed as well as dual arm human manipulation movements. The algorithm can be used, e.g., in robotic applications such as imitation learning, in which human movement examples are directly used to generate robotic behavior. In this paper, we present two variants of the hierarchical segmentation algorithm, one supervised approach which requires a small number of pre-labeled movements as training data, as well as an approach which uses unsupervised algorithms to group building block segments which belong to the same movement. In both variants, the building block movements are detected based on the velocity of the hand(s), using the velocity-based multiple change-point inference algorithm. We evaluate both methods on human manipulation movements recorded from several participants with a marker-based motion tracking system. The first evaluations are done on simple one-handed point-to-point movements, followed by an evaluation on a complex dual arm manipulation task. The results show, that the presented approaches are able to identify basic movements as well as their concatenations into more complex, labeled actions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a segmentation algorithm which splits complex human manipulation movements into movement segments and automatically groups these into labeled actions. With this hierarchical algorithm the basic movement identities, which we call building blocks, as well as their concatenation to more complex actions can be identified in one handed as well as dual arm human manipulation movements. The algorithm can be used, e.g., in robotic applications such as imitation learning, in which human movement examples are directly used to generate robotic behavior. In this paper, we present two variants of the hierarchical segmentation algorithm, one supervised approach which requires a small number of pre-labeled movements as training data, as well as an approach which uses unsupervised algorithms to group building block segments which belong to the same movement. In both variants, the building block movements are detected based on the velocity of the hand(s), using the velocity-based multiple change-point inference algorithm. We evaluate both methods on human manipulation movements recorded from several participants with a marker-based motion tracking system. The first evaluations are done on simple one-handed point-to-point movements, followed by an evaluation on a complex dual arm manipulation task. The results show, that the presented approaches are able to identify basic movements as well as their concatenations into more complex, labeled actions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a segmentation algorithm which splits complex human manipulation movements into movement segments and automatically groups these into labeled actions. With this hierarchical algorithm the basic movement identities, which we call building blocks, as well as their concatenation to more complex actions can be identified in one handed as well as dual arm human manipulation movements. The algorithm can be used, e.g., in robotic applications such as imitation learning, in which human movement examples are directly used to generate robotic behavior. In this paper, we present two variants of the hierarchical segmentation algorithm, one supervised approach which requires a small number of pre-labeled movements as training data, as well as an approach which uses unsupervised algorithms to group building block segments which belong to the same movement. In both variants, the building block movements are detected based on the velocity of the hand(s), using the velocity-based multiple change-point inference algorithm. We evaluate both methods on human manipulation movements recorded from several participants with a marker-based motion tracking system. The first evaluations are done on simple one-handed point-to-point movements, followed by an evaluation on a complex dual arm manipulation task. The results show, that the presented approaches are able to identify basic movements as well as their concatenations into more complex, labeled actions.",
"fno": "09955634",
"keywords": [
"Image Motion Analysis",
"Image Segmentation",
"Inference Mechanisms",
"Learning Artificial Intelligence",
"Manipulators",
"Motion Control",
"Unsupervised Learning",
"Basic Movement Identities",
"Basic Movements",
"Building Block Movements",
"Complex Actions",
"Complex Dual Arm Manipulation Task",
"Complex Human Manipulation Movements",
"Dual Arm Human Manipulation Movements",
"Group Building Block Segments",
"Hierarchical Algorithm",
"Hierarchical Segmentation Algorithm",
"Human Movement Examples",
"Labeled Actions",
"Movement Segments",
"Pre Labeled Movements",
"Simple One Handed Point To Point Movements",
"Unsupervised Algorithms",
"Velocity Based Multiple Change Point Inference Algorithm",
"Training",
"Tracking",
"Motion Segmentation",
"Training Data",
"Inference Algorithms",
"Classification Algorithms",
"Pattern Recognition"
],
"authors": [
{
"affiliation": "University of Bremen,Robotics Group,Bremen,Germany",
"fullName": "Lisa Gutzeit",
"givenName": "Lisa",
"surname": "Gutzeit",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "2742-2748",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9062-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09956269",
"articleId": "1IHqDl8byKc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09956049",
"articleId": "1IHq2AKQPZK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ca/2002/1594/0/15940103",
"title": "Extensive and Efficient Search of Human Movements with Hierarchical Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2002/15940103/12OmNBpmDJQ",
"parentPublication": {
"id": "proceedings/ca/2002/1594/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420624",
"title": "Recognition of human body motion using phase space constraints",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420624/12OmNC9lEFj",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209e564",
"title": "Velocity-Based Multiple Change-Point Inference for Unsupervised Segmentation of Human Movement Behavior",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209e564/12OmNCbCrYd",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492759",
"title": "Precise and rapid interaction through scaled manipulation in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492759/12OmNqHItGo",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607432",
"title": "Human movement summarization and depiction from videos",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607432/12OmNrkT7xl",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2012/4725/0/4725a191",
"title": "Guidance and Movement Correction Based on Therapeutics Movements for Motor Rehabilitation Support Systems",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2012/4725a191/12OmNwtWfRu",
"parentPublication": {
"id": "proceedings/svr/2012/4725/0",
"title": "2012 14th Symposium on Virtual and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2010/6846/0/05444707",
"title": "A framework for volume segmentation and visualization using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2010/05444707/12OmNyz5JSr",
"parentPublication": {
"id": "proceedings/3dui/2010/6846/0",
"title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2007/0905/0/04161046",
"title": "Pointman - A New Control for Simulating Tactical Infantry Movements",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161046/12OmNzwHvkp",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/04/mcg2017040095",
"title": "Performance-Based Animation Using Constraints for Virtual Object Manipulation",
"doi": null,
"abstractUrl": "/magazine/cg/2017/04/mcg2017040095/13rRUxjyXcS",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2019/05/08713835",
"title": "Human Eye Movements Reveal Video Frame Importance",
"doi": null,
"abstractUrl": "/magazine/co/2019/05/08713835/1a31lyCQdiM",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1KxUhhFgzlK",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1KxUFsh4ZdS",
"doi": "10.1109/WACV56688.2023.00440",
"title": "Text and Image Guided 3D Avatar Generation and Manipulation",
"normalizedTitle": "Text and Image Guided 3D Avatar Generation and Manipulation",
"abstract": "The manipulation of latent space has recently become an interesting topic in the field of generative models. Recent research shows that latent directions can be used to manipulate images towards certain attributes. However, controlling the generation process of 3D generative models remains a challenge. In this work, we propose a novel 3D manipulation method that can manipulate both the shape and texture of the model using text or image-based prompts such as ’a young face’ or ’a surprised face’. We leverage the power of Contrastive Language-Image Pre-training (CLIP) model and a pre-trained 3D GAN model designed to generate face avatars and create a fully differentiable rendering pipeline to manipulate meshes. More specifically, our method takes an input latent code and modifies it such that the target attribute specified by a text or image prompt is present or enhanced while leaving other attributes largely unaffected. Our method requires only 5 minutes per manipulation, and we demonstrate the effectiveness of our approach with extensive results and comparisons.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The manipulation of latent space has recently become an interesting topic in the field of generative models. Recent research shows that latent directions can be used to manipulate images towards certain attributes. However, controlling the generation process of 3D generative models remains a challenge. In this work, we propose a novel 3D manipulation method that can manipulate both the shape and texture of the model using text or image-based prompts such as ’a young face’ or ’a surprised face’. We leverage the power of Contrastive Language-Image Pre-training (CLIP) model and a pre-trained 3D GAN model designed to generate face avatars and create a fully differentiable rendering pipeline to manipulate meshes. More specifically, our method takes an input latent code and modifies it such that the target attribute specified by a text or image prompt is present or enhanced while leaving other attributes largely unaffected. Our method requires only 5 minutes per manipulation, and we demonstrate the effectiveness of our approach with extensive results and comparisons.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The manipulation of latent space has recently become an interesting topic in the field of generative models. Recent research shows that latent directions can be used to manipulate images towards certain attributes. However, controlling the generation process of 3D generative models remains a challenge. In this work, we propose a novel 3D manipulation method that can manipulate both the shape and texture of the model using text or image-based prompts such as ’a young face’ or ’a surprised face’. We leverage the power of Contrastive Language-Image Pre-training (CLIP) model and a pre-trained 3D GAN model designed to generate face avatars and create a fully differentiable rendering pipeline to manipulate meshes. More specifically, our method takes an input latent code and modifies it such that the target attribute specified by a text or image prompt is present or enhanced while leaving other attributes largely unaffected. Our method requires only 5 minutes per manipulation, and we demonstrate the effectiveness of our approach with extensive results and comparisons.",
"fno": "934600e410",
"keywords": [
"Avatars",
"Face Recognition",
"Neural Nets",
"Rendering Computer Graphics",
"Solid Modelling",
"Face Avatars",
"Generation Process",
"Generative Models",
"Image Based Prompts",
"Input Latent Code",
"Latent Directions",
"Latent Space",
"Pre Trained 3 D GAN Model",
"Pretrained 3 D GAN Model",
"Surprised Face",
"Time 5 0 Min",
"Young Face",
"Solid Modeling",
"Three Dimensional Displays",
"Shape",
"Avatars",
"Source Coding",
"Pipelines",
"Process Control",
"Algorithms 3 D Computer Vision",
"Biometrics",
"Face",
"Gesture",
"Body Pose"
],
"authors": [
{
"affiliation": "Boğaziçi University,Istanbul,Turkey",
"fullName": "Zehranaz Canfes",
"givenName": "Zehranaz",
"surname": "Canfes",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Boğaziçi University,Istanbul,Turkey",
"fullName": "M. Furkan Atasoy",
"givenName": "M. Furkan",
"surname": "Atasoy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Boğaziçi University,Istanbul,Turkey",
"fullName": "Alara Dirik",
"givenName": "Alara",
"surname": "Dirik",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Boğaziçi University,Istanbul,Turkey",
"fullName": "Pinar Yanardag",
"givenName": "Pinar",
"surname": "Yanardag",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-01-01T00:00:00",
"pubType": "proceedings",
"pages": "4410-4420",
"year": "2023",
"issn": null,
"isbn": "978-1-6654-9346-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1KxUFoR2D3G",
"name": "pwacv202393460-010030861s1-mm_934600e410.zip",
"size": "678 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pwacv202393460-010030861s1-mm_934600e410.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "934600e399",
"articleId": "1L8qxHGiBPy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "934600e421",
"articleId": "1L8qC9qbtRe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2008/2570/0/04607657",
"title": "Real-time conversion from a single 2D face image to a 3D text-driven emotive audio-visual avatar",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607657/12OmNx3HI5p",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446445",
"title": "A Framework for Virtual 3D Manipulation of Face in Video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446445/13bd1AITnaH",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500d441",
"title": "StyleMC: Multi-Channel Based Fast Text-Guided Image Generation and Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500d441/1B12HcraGYM",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c065",
"title": "StyleCLIP: Text-Driven Manipulation of StyleGAN Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c065/1BmKkMzKY1i",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600b053",
"title": "Interactive Image Manipulation with Complex Text Instructions",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600b053/1KxVAekpq9O",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09157962",
"title": "Facial Expression Retargeting From Human to Avatar Made Easy",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09157962/1m1eKuAoOoE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f548",
"title": "MaskGAN: Towards Diverse and Interactive Facial Image Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f548/1m3ofjibVF6",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09444850",
"title": "Text-Guided Human Image Manipulation via Image-Text Shared Space",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09444850/1u51vKl5cSk",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c256",
"title": "TediGAN: Text-Guided Diverse Face Image Generation and Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c256/1yeKSL9mS2I",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09668999",
"title": "Cross-Domain and Disentangled Face Manipulation With 3D Guidance",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09668999/1zTfZzq1wqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeKXyjY3Wo",
"doi": "10.1109/CVPR46437.2021.00447",
"title": "ManipulaTHOR: A Framework for Visual Object Manipulation",
"normalizedTitle": "ManipulaTHOR: A Framework for Visual Object Manipulation",
"abstract": "The domain of Embodied AI has recently witnessed substantial progress, particularly in navigating agents within their environments. These early successes have laid the building blocks for the community to tackle tasks that require agents to actively interact with objects in their environment. Object manipulation is an established research domain within the robotics community and poses several challenges including manipulator motion, grasping and long-horizon planning, particularly when dealing with oft-overlooked practical setups involving visually rich and complex scenes, manipulation using mobile agents (as opposed to tabletop manipulation), and generalization to unseen environments and objects. We propose a framework for object manipulation built upon the physics-enabled, visually rich AI2-THOR framework and present a new challenge to the Embodied AI community known as ArmPointNav. This task extends the popular point navigation task [2] to object manipulation and offers new challenges including 3D obstacle avoidance, manipulating objects in the presence of occlusion, and multi-object manipulation that necessitates long term planning. Popular learning paradigms that are successful on PointNav challenges show promise, but leave a large room for improvement.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The domain of Embodied AI has recently witnessed substantial progress, particularly in navigating agents within their environments. These early successes have laid the building blocks for the community to tackle tasks that require agents to actively interact with objects in their environment. Object manipulation is an established research domain within the robotics community and poses several challenges including manipulator motion, grasping and long-horizon planning, particularly when dealing with oft-overlooked practical setups involving visually rich and complex scenes, manipulation using mobile agents (as opposed to tabletop manipulation), and generalization to unseen environments and objects. We propose a framework for object manipulation built upon the physics-enabled, visually rich AI2-THOR framework and present a new challenge to the Embodied AI community known as ArmPointNav. This task extends the popular point navigation task [2] to object manipulation and offers new challenges including 3D obstacle avoidance, manipulating objects in the presence of occlusion, and multi-object manipulation that necessitates long term planning. Popular learning paradigms that are successful on PointNav challenges show promise, but leave a large room for improvement.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The domain of Embodied AI has recently witnessed substantial progress, particularly in navigating agents within their environments. These early successes have laid the building blocks for the community to tackle tasks that require agents to actively interact with objects in their environment. Object manipulation is an established research domain within the robotics community and poses several challenges including manipulator motion, grasping and long-horizon planning, particularly when dealing with oft-overlooked practical setups involving visually rich and complex scenes, manipulation using mobile agents (as opposed to tabletop manipulation), and generalization to unseen environments and objects. We propose a framework for object manipulation built upon the physics-enabled, visually rich AI2-THOR framework and present a new challenge to the Embodied AI community known as ArmPointNav. This task extends the popular point navigation task [2] to object manipulation and offers new challenges including 3D obstacle avoidance, manipulating objects in the presence of occlusion, and multi-object manipulation that necessitates long term planning. Popular learning paradigms that are successful on PointNav challenges show promise, but leave a large room for improvement.",
"fno": "450900e495",
"keywords": [
"Collision Avoidance",
"Control Engineering Computing",
"Learning Artificial Intelligence",
"Manipulators",
"Mobile Robots",
"Path Planning",
"Robot Vision",
"Visual Object Manipulation",
"Robotics Community",
"Manipulator Motion",
"Complex Scenes",
"Mobile Agents",
"Tabletop Manipulation",
"AI 2 THOR Framework",
"Multiobject Manipulation",
"Manipula THOR",
"Grasping",
"Embodied AI Community",
"Point Navigation Task",
"3 D Obstacle Avoidance",
"Arm Point Nav",
"Visualization",
"Three Dimensional Displays",
"Navigation",
"Mobile Agents",
"Grasping",
"Manipulators",
"Planning"
],
"authors": [
{
"affiliation": "Allen Institute for AI",
"fullName": "Kiana Ehsani",
"givenName": "Kiana",
"surname": "Ehsani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Allen Institute for AI",
"fullName": "Winson Han",
"givenName": "Winson",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Allen Institute for AI",
"fullName": "Alvaro Herrasti",
"givenName": "Alvaro",
"surname": "Herrasti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Allen Institute for AI",
"fullName": "Eli VanderBilt",
"givenName": "Eli",
"surname": "VanderBilt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Allen Institute for AI",
"fullName": "Luca Weihs",
"givenName": "Luca",
"surname": "Weihs",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Allen Institute for AI",
"fullName": "Eric Kolve",
"givenName": "Eric",
"surname": "Kolve",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Allen Institute for AI",
"fullName": "Aniruddha Kembhavi",
"givenName": "Aniruddha",
"surname": "Kembhavi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Allen Institute for AI",
"fullName": "Roozbeh Mottaghi",
"givenName": "Roozbeh",
"surname": "Mottaghi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4495-4504",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeKXsQtOne",
"name": "pcvpr202145090-09578091s1-mm_450900e495.zip",
"size": "8.24 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09578091s1-mm_450900e495.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900e484",
"articleId": "1yeJP2Jrp6M",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900e505",
"articleId": "1yeI1z8OtvW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/robot/1989/1938/0/00100100",
"title": "Finger force computation for manipulation of an object by a multifingered robot hand",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1989/00100100/12OmNAoDhTb",
"parentPublication": {
"id": "proceedings/robot/1989/1938/0",
"title": "1989 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isatp/2003/7770/0/01217208",
"title": "Passing manipulation by 1 degree-of-freedom manipulator - catching manipulation of tossed object without impact",
"doi": null,
"abstractUrl": "/proceedings-article/isatp/2003/01217208/12OmNCbU2PO",
"parentPublication": {
"id": "proceedings/isatp/2003/7770/0",
"title": "ISATP'03: 5th IEEE International Symposium on Assembly and Task Planning",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00219925",
"title": "Experiments in dual-arm manipulation planning",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00219925/12OmNrMZpI9",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012029",
"title": "Chopstick manipulation with an articulated hand-a qualitative analysis",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012029/12OmNs4S8BA",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131782",
"title": "A framework for planning dexterous manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131782/12OmNvo67Fd",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600f208",
"title": "Towards Disturbance-Free Visual Mobile Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600f208/1KxUJtL4khy",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a195",
"title": "Tangi: Tangible Proxies For Embodied Object Exploration And Manipulation In Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a195/1pystVP5LFK",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a060",
"title": "VR Collaborative Object Manipulation Based on Viewpoint Quality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a060/1yeCT7VcdEc",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j863",
"title": "Pushing it out of the Way: Interactive Visual Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j863/1yeJ7xA5QB2",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f918",
"title": "Visual Room Rearrangement",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f918/1yeM1tGD8IM",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwMXnuY",
"title": "2013 International Conference on Culture and Computing (Culture Computing)",
"acronym": "culture-computing",
"groupId": "1800597",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAQJzMW",
"doi": "10.1109/CultureComputing.2013.42",
"title": "Affective Music Recommendation System Reflecting the Mood of Input Image",
"normalizedTitle": "Affective Music Recommendation System Reflecting the Mood of Input Image",
"abstract": "We present an affective music recommendation system using input images without textual information. Music that matches our current mood can create a deep impression. However, we do not know which music best matches our present mood. As it is difficult to select music manually, we need a recommendation system that can operate affectively. In this paper, we assume that there exists a relationship between our mood and images because visual information affects our mood when we listen to music. Our system matches an input image with music using valence-arousal plane which is an emotional plane.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an affective music recommendation system using input images without textual information. Music that matches our current mood can create a deep impression. However, we do not know which music best matches our present mood. As it is difficult to select music manually, we need a recommendation system that can operate affectively. In this paper, we assume that there exists a relationship between our mood and images because visual information affects our mood when we listen to music. Our system matches an input image with music using valence-arousal plane which is an emotional plane.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an affective music recommendation system using input images without textual information. Music that matches our current mood can create a deep impression. However, we do not know which music best matches our present mood. As it is difficult to select music manually, we need a recommendation system that can operate affectively. In this paper, we assume that there exists a relationship between our mood and images because visual information affects our mood when we listen to music. Our system matches an input image with music using valence-arousal plane which is an emotional plane.",
"fno": "5047a153",
"keywords": [
"Mood",
"Music",
"Databases",
"Recommender Systems",
"Image Color Analysis",
"Equations",
"Images And Sounds",
"Music Recommendation",
"Image Processing",
"Valence",
"Arousal"
],
"authors": [
{
"affiliation": "Waseda Univ., Tokyo, Japan",
"fullName": "Shoto Sasaki",
"givenName": "Shoto",
"surname": "Sasaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Waseda Univ., Tokyo, Japan",
"fullName": "Tatsunori Hirai",
"givenName": "Tatsunori",
"surname": "Hirai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Waseda Univ., Tokyo, Japan",
"fullName": "Hayato Ohya",
"givenName": "Hayato",
"surname": "Ohya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Waseda Univ., Tokyo, Japan",
"fullName": "Shigeo Morishima",
"givenName": "Shigeo",
"surname": "Morishima",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "culture-computing",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-09-01T00:00:00",
"pubType": "proceedings",
"pages": "153-154",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5047-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5047a151",
"articleId": "12OmNzayNan",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5047a155",
"articleId": "12OmNB0nWdt",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2017/6067/0/08019341",
"title": "Automatic music mood classification by learning cross-media relevance between audio and lyrics",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019341/12OmNARRYkx",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ncm/2009/3769/0/3769b485",
"title": "Acquiring Mood Information from Songs in Large Music Database",
"doi": null,
"abstractUrl": "/proceedings-article/ncm/2009/3769b485/12OmNB836TX",
"parentPublication": {
"id": "proceedings/ncm/2009/3769/0",
"title": "Networked Computing and Advanced Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cloudcom/2015/9560/0/9560a475",
"title": "A Novel Cloud-Based Crowd Sensing Approach to Context-Aware Music Mood-Mapping for Drivers",
"doi": null,
"abstractUrl": "/proceedings-article/cloudcom/2015/9560a475/12OmNBIWXC4",
"parentPublication": {
"id": "proceedings/cloudcom/2015/9560/0",
"title": "2015 IEEE 7th International Conference on Cloud Computing Technology and Science (CloudCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciids/2009/3580/0/3580a167",
"title": "A Similar Music Retrieval Scheme Based on Musical Mood Variation",
"doi": null,
"abstractUrl": "/proceedings-article/aciids/2009/3580a167/12OmNBkxsv6",
"parentPublication": {
"id": "proceedings/aciids/2009/3580/0",
"title": "Intelligent Information and Database Systems, Asian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ams/2012/4730/0/4730a007",
"title": "Automatic Mood Classification Model for Indian Popular Music",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2012/4730a007/12OmNqAU6DC",
"parentPublication": {
"id": "proceedings/ams/2012/4730/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2012/4771/0/4771a143",
"title": "Using Animated Mood Pictures in Music Recommendation",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2012/4771a143/12OmNvAiSHL",
"parentPublication": {
"id": "proceedings/iv/2012/4771/0",
"title": "2012 16th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349591",
"title": "The power of words: Enhancing music mood estimation with textual input of lyrics",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349591/12OmNvjyxRQ",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012116",
"title": "Smoodi: Mood-based music recommendation player",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012116/12OmNyS6RHE",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icds/2009/3526/0/3526a304",
"title": "Music Ontology for Mood and Situation Reasoning to Support Music Retrieval and Recommendation",
"doi": null,
"abstractUrl": "/proceedings-article/icds/2009/3526a304/12OmNzmtWzm",
"parentPublication": {
"id": "proceedings/icds/2009/3526/0",
"title": "International Conference on the Digital Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2013/01/tta2013010057",
"title": "Directing Physiology and Mood through Music: Validation of an Affective Music Player",
"doi": null,
"abstractUrl": "/journal/ta/2013/01/tta2013010057/13rRUxAASZo",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBbaH9O",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"acronym": "ism",
"groupId": "1001094",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAYGlwT",
"doi": "10.1109/ISM.2017.10",
"title": "Computational and Perceptual Determinants of Film Mood in Different Types of Scenes",
"normalizedTitle": "Computational and Perceptual Determinants of Film Mood in Different Types of Scenes",
"abstract": "Films seek to elicit emotions in viewers by infusing the story they tell with an affective character or tone - in a word, a mood. In content-based multimedia analysis, considerable effort has been made to develop methods to estimate film affect computationally. However, results have been hampered by a tendency to classify film scenes either by genre or not at all, while other potentially helpful classification methods have been neglected. In this study, we investigated the quantitative determinants of film mood across different types of scenes. We first collected style and mood ratings for 50 film scenes, which we classified by their location, time of day, and their use of dialogue and music. We then investigated whether the viewers rated the mood (in terms of hedonic tone, energetic arousal, and tense arousal) of various scene types differently, and how well perceptual stylistic attributes as well as low- and high-level computational features correlated with the mood ratings. We found that the mood ratings and their quantitative determinants differed across the scene types. We also found that the energetic arousal ratings were associated with the stylistic attributes and their corresponding low-level features, while hedonic tone and tense arousal were associated with high-level features related to the emotional expression in faces, dialogue, and music. The study contributes to ongoing efforts to estimate film affect computationally in showing that results can be improved by utilizing both low- and high-level features and by considering different scene types separately.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Films seek to elicit emotions in viewers by infusing the story they tell with an affective character or tone - in a word, a mood. In content-based multimedia analysis, considerable effort has been made to develop methods to estimate film affect computationally. However, results have been hampered by a tendency to classify film scenes either by genre or not at all, while other potentially helpful classification methods have been neglected. In this study, we investigated the quantitative determinants of film mood across different types of scenes. We first collected style and mood ratings for 50 film scenes, which we classified by their location, time of day, and their use of dialogue and music. We then investigated whether the viewers rated the mood (in terms of hedonic tone, energetic arousal, and tense arousal) of various scene types differently, and how well perceptual stylistic attributes as well as low- and high-level computational features correlated with the mood ratings. We found that the mood ratings and their quantitative determinants differed across the scene types. We also found that the energetic arousal ratings were associated with the stylistic attributes and their corresponding low-level features, while hedonic tone and tense arousal were associated with high-level features related to the emotional expression in faces, dialogue, and music. The study contributes to ongoing efforts to estimate film affect computationally in showing that results can be improved by utilizing both low- and high-level features and by considering different scene types separately.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Films seek to elicit emotions in viewers by infusing the story they tell with an affective character or tone - in a word, a mood. In content-based multimedia analysis, considerable effort has been made to develop methods to estimate film affect computationally. However, results have been hampered by a tendency to classify film scenes either by genre or not at all, while other potentially helpful classification methods have been neglected. In this study, we investigated the quantitative determinants of film mood across different types of scenes. We first collected style and mood ratings for 50 film scenes, which we classified by their location, time of day, and their use of dialogue and music. We then investigated whether the viewers rated the mood (in terms of hedonic tone, energetic arousal, and tense arousal) of various scene types differently, and how well perceptual stylistic attributes as well as low- and high-level computational features correlated with the mood ratings. We found that the mood ratings and their quantitative determinants differed across the scene types. We also found that the energetic arousal ratings were associated with the stylistic attributes and their corresponding low-level features, while hedonic tone and tense arousal were associated with high-level features related to the emotional expression in faces, dialogue, and music. The study contributes to ongoing efforts to estimate film affect computationally in showing that results can be improved by utilizing both low- and high-level features and by considering different scene types separately.",
"fno": "2937a185",
"keywords": [
"Mood",
"Music",
"Estimation",
"Visualization",
"Motion Pictures",
"Image Color Analysis",
"Film",
"Affect",
"Mood",
"Style",
"Content Based Analysis"
],
"authors": [
{
"affiliation": null,
"fullName": "Jussi Tarvainen",
"givenName": "Jussi",
"surname": "Tarvainen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jorma Laaksonen",
"givenName": "Jorma",
"surname": "Laaksonen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tapio Takala",
"givenName": "Tapio",
"surname": "Takala",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ism",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-12-01T00:00:00",
"pubType": "proceedings",
"pages": "185-192",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-2937-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2937a177",
"articleId": "12OmNyvoX91",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2937a193",
"articleId": "12OmNrJiCUO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/culture-computing/2013/5047/0/5047a153",
"title": "Affective Music Recommendation System Reflecting the Mood of Input Image",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2013/5047a153/12OmNAQJzMW",
"parentPublication": {
"id": "proceedings/culture-computing/2013/5047/0",
"title": "2013 International Conference on Culture and Computing (Culture Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ncm/2009/3769/0/3769b485",
"title": "Acquiring Mood Information from Songs in Large Music Database",
"doi": null,
"abstractUrl": "/proceedings-article/ncm/2009/3769b485/12OmNB836TX",
"parentPublication": {
"id": "proceedings/ncm/2009/3769/0",
"title": "Networked Computing and Advanced Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a726",
"title": "Mood Conductor: Emotion-Driven Interactive Music Performance",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a726/12OmNvIxeWS",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349591",
"title": "The power of words: Enhancing music mood estimation with textual input of lyrics",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349591/12OmNvjyxRQ",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012116",
"title": "Smoodi: Mood-based music recommendation player",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012116/12OmNyS6RHE",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/2/01394329",
"title": "Color-mood analysis of films based on syntactic and psychological models",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394329/12OmNz5s0E4",
"parentPublication": {
"id": "proceedings/icme/2004/8603/2",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2013/1604/0/06618436",
"title": "Semantic models of musical mood: Comparison between crowd-sourced and curated editorial tags",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2013/06618436/12OmNzlD9si",
"parentPublication": {
"id": "proceedings/icmew/2013/1604/0",
"title": "2013 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2020/02/08252762",
"title": "Film Mood and Its Quantitative Determinants in Different Types of Scenes",
"doi": null,
"abstractUrl": "/journal/ta/2020/02/08252762/13rRUwhpBCr",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2016/02/07173419",
"title": "Genre-Adaptive Semantic Computing and Audio-Based Modelling for Music Mood Annotation",
"doi": null,
"abstractUrl": "/journal/ta/2016/02/07173419/13rRUxNEqOa",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/02/08822965",
"title": "On the Influence of Shot Scale on Film Mood and Narrative Engagement in Film Viewers",
"doi": null,
"abstractUrl": "/journal/ta/2022/02/08822965/1d1yNMygICY",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAR1b0Z",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC2fGso",
"doi": "10.1109/CVPRW.2017.283",
"title": "DeepSpace: Mood-Based Image Texture Generation for Virtual Reality from Music",
"normalizedTitle": "DeepSpace: Mood-Based Image Texture Generation for Virtual Reality from Music",
"abstract": "Affective virtual spaces are of interest for many VR applications in areas of wellbeing, art, education, and entertainment. Creating content for virtual environments is a laborious task involving multiple skills like 3D modeling, texturing, animation, lighting, and programming. One way to facilitate content creation is to automate sub-processes like assignment of textures and materials within virtual environments. To this end, we introduce the DeepSpace approach that automatically creates and applies image textures to objects in procedurally created 3D scenes. The main novelty of our DeepSpace approach is that it uses music to automatically create kaleidoscopic textures for virtual environments designed to elicit emotional responses in users. Specifically, DeepSpace exploits the modeling power of deep neural networks, which have shown great performance in image generation tasks, to achieve mood-based image generation. Our study results indicate the virtual environments created by DeepSpace elicit positive emotions and achieve high presence scores.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Affective virtual spaces are of interest for many VR applications in areas of wellbeing, art, education, and entertainment. Creating content for virtual environments is a laborious task involving multiple skills like 3D modeling, texturing, animation, lighting, and programming. One way to facilitate content creation is to automate sub-processes like assignment of textures and materials within virtual environments. To this end, we introduce the DeepSpace approach that automatically creates and applies image textures to objects in procedurally created 3D scenes. The main novelty of our DeepSpace approach is that it uses music to automatically create kaleidoscopic textures for virtual environments designed to elicit emotional responses in users. Specifically, DeepSpace exploits the modeling power of deep neural networks, which have shown great performance in image generation tasks, to achieve mood-based image generation. Our study results indicate the virtual environments created by DeepSpace elicit positive emotions and achieve high presence scores.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Affective virtual spaces are of interest for many VR applications in areas of wellbeing, art, education, and entertainment. Creating content for virtual environments is a laborious task involving multiple skills like 3D modeling, texturing, animation, lighting, and programming. One way to facilitate content creation is to automate sub-processes like assignment of textures and materials within virtual environments. To this end, we introduce the DeepSpace approach that automatically creates and applies image textures to objects in procedurally created 3D scenes. The main novelty of our DeepSpace approach is that it uses music to automatically create kaleidoscopic textures for virtual environments designed to elicit emotional responses in users. Specifically, DeepSpace exploits the modeling power of deep neural networks, which have shown great performance in image generation tasks, to achieve mood-based image generation. Our study results indicate the virtual environments created by DeepSpace elicit positive emotions and achieve high presence scores.",
"fno": "0733c289",
"keywords": [
"Mood",
"Solid Modeling",
"Three Dimensional Displays",
"Image Generation",
"Neural Networks",
"Training",
"Virtual Reality"
],
"authors": [
{
"affiliation": null,
"fullName": "Misha Sra",
"givenName": "Misha",
"surname": "Sra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Prashanth Vijayaraghavan",
"givenName": "Prashanth",
"surname": "Vijayaraghavan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ognjen Rudovic",
"givenName": "Ognjen",
"surname": "Rudovic",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pattie Maes",
"givenName": "Pattie",
"surname": "Maes",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Deb Roy",
"givenName": "Deb",
"surname": "Roy",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "2289-2298",
"year": "2017",
"issn": "2160-7516",
"isbn": "978-1-5386-0733-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0733c278",
"articleId": "12OmNxE2mWZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0733c299",
"articleId": "12OmNzaQoPr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892385",
"title": "Rapid creation of photorealistic virtual reality content with consumer depth cameras",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892385/12OmNqHqSmO",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2013/5261/0/06684874",
"title": "Virtual Learning Environments in engineering and STEM education",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2013/06684874/12OmNxuFBpQ",
"parentPublication": {
"id": "proceedings/fie/2013/5261/0",
"title": "2013 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmcs/1997/7819/0/00609772",
"title": "Toward next generation virtual reality systems",
"doi": null,
"abstractUrl": "/proceedings-article/mmcs/1997/00609772/12OmNylboJ3",
"parentPublication": {
"id": "proceedings/mmcs/1997/7819/0",
"title": "Proceedings of IEEE International Conference on Multimedia Computing and Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948452",
"title": "[Poster] Representing degradation of real objects using augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948452/12OmNzgeLHy",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446410",
"title": "Virtual Content Creation Using Dynamic Omnidirectional Texture Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446410/13bd1gzWkRf",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a442",
"title": "A Pilot Study: VR and Binaural Sounds for Mood Management",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a442/17D45Xh13sX",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a657",
"title": "Mixed Reality for Engineering Design Review Using Finite Element Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a657/1J7WwCL6CCQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797768",
"title": "Towards a Framework for Composition Design for Music-Led Virtual Reality Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797768/1cJ0QaC6GyI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798166",
"title": "Collaborative and Competitive Futures for Virtual Reality Music and Sound",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798166/1cJ1c6vbhe0",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a460",
"title": "MAXIM: Mixed-reality Automotive Driving XIMulation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a460/1gyslTTdbqw",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzBOhX1",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"acronym": "acii",
"groupId": "1002992",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvIxeWS",
"doi": "10.1109/ACII.2013.165",
"title": "Mood Conductor: Emotion-Driven Interactive Music Performance",
"normalizedTitle": "Mood Conductor: Emotion-Driven Interactive Music Performance",
"abstract": "Mood Conductor is a system that allows the audience to interact with stage performers to create directed improvisations. The term \"conductor\" is used metaphorically. Rather than directing a musical performance by way of visible gestures, spectators act as conductors by communicating emotional intentions to the performers through our web-based smartphone-friendly Mood Conductor app. Performers receive the audience's directions via a visual feedback system operating in real-time. Emotions are represented by coloured blobs in a two-dimensional space (vertical dimension: arousal or excitation; horizontal dimension: valence or pleasantness). The size of the \"emotion blobs\" indicates the number of spectators that have selected the corresponding emotions at a given time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mood Conductor is a system that allows the audience to interact with stage performers to create directed improvisations. The term \"conductor\" is used metaphorically. Rather than directing a musical performance by way of visible gestures, spectators act as conductors by communicating emotional intentions to the performers through our web-based smartphone-friendly Mood Conductor app. Performers receive the audience's directions via a visual feedback system operating in real-time. Emotions are represented by coloured blobs in a two-dimensional space (vertical dimension: arousal or excitation; horizontal dimension: valence or pleasantness). The size of the \"emotion blobs\" indicates the number of spectators that have selected the corresponding emotions at a given time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mood Conductor is a system that allows the audience to interact with stage performers to create directed improvisations. The term \"conductor\" is used metaphorically. Rather than directing a musical performance by way of visible gestures, spectators act as conductors by communicating emotional intentions to the performers through our web-based smartphone-friendly Mood Conductor app. Performers receive the audience's directions via a visual feedback system operating in real-time. Emotions are represented by coloured blobs in a two-dimensional space (vertical dimension: arousal or excitation; horizontal dimension: valence or pleasantness). The size of the \"emotion blobs\" indicates the number of spectators that have selected the corresponding emotions at a given time.",
"fno": "5048a726",
"keywords": [
"Mood",
"Conductors",
"Music",
"Educational Institutions",
"Computational Modeling",
"Visualization",
"Real Time Systems"
],
"authors": [
{
"affiliation": "Sch. of Electron. Eng. & Comput. Sci., Queen Mary Univ. of London, London, UK",
"fullName": "Gyorgy Fazekas",
"givenName": "Gyorgy",
"surname": "Fazekas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Electron. Eng. & Comput. Sci., Queen Mary Univ. of London, London, UK",
"fullName": "Mathieu Barthet",
"givenName": "Mathieu",
"surname": "Barthet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Electron. Eng. & Comput. Sci., Queen Mary Univ. of London, London, UK",
"fullName": "Mark B. Sandler",
"givenName": "Mark B.",
"surname": "Sandler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-09-01T00:00:00",
"pubType": "proceedings",
"pages": "726-726",
"year": "2013",
"issn": "2156-8103",
"isbn": "978-0-7695-5048-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5048a724",
"articleId": "12OmNzahbSm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5048a727",
"articleId": "12OmNB0FxiT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/culture-computing/2013/5047/0/5047a153",
"title": "Affective Music Recommendation System Reflecting the Mood of Input Image",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2013/5047a153/12OmNAQJzMW",
"parentPublication": {
"id": "proceedings/culture-computing/2013/5047/0",
"title": "2013 International Conference on Culture and Computing (Culture Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ncm/2009/3769/0/3769b485",
"title": "Acquiring Mood Information from Songs in Large Music Database",
"doi": null,
"abstractUrl": "/proceedings-article/ncm/2009/3769b485/12OmNB836TX",
"parentPublication": {
"id": "proceedings/ncm/2009/3769/0",
"title": "Networked Computing and Advanced Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciids/2009/3580/0/3580a167",
"title": "A Similar Music Retrieval Scheme Based on Musical Mood Variation",
"doi": null,
"abstractUrl": "/proceedings-article/aciids/2009/3580a167/12OmNBkxsv6",
"parentPublication": {
"id": "proceedings/aciids/2009/3580/0",
"title": "Intelligent Information and Database Systems, Asian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2014/5569/0/06970232",
"title": "Mood metadata for video games and interactive media",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2014/06970232/12OmNCzsKGA",
"parentPublication": {
"id": "proceedings/jcdl/2014/5569/0",
"title": "2014 IEEE/ACM Joint Conference on Digital Libraries (JCDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ams/2012/4730/0/4730a007",
"title": "Automatic Mood Classification Model for Indian Popular Music",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2012/4730a007/12OmNqAU6DC",
"parentPublication": {
"id": "proceedings/ams/2012/4730/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmla/2008/3495/0/3495a688",
"title": "Multimodal Music Mood Classification Using Audio and Lyrics",
"doi": null,
"abstractUrl": "/proceedings-article/icmla/2008/3495a688/12OmNvkGWa0",
"parentPublication": {
"id": "proceedings/icmla/2008/3495/0",
"title": "2008 Seventh International Conference on Machine Learning and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012116",
"title": "Smoodi: Mood-based music recommendation player",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012116/12OmNyS6RHE",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbmi/2009/3662/0/3662a156",
"title": "Music Mood Annotator Design and Integration",
"doi": null,
"abstractUrl": "/proceedings-article/cbmi/2009/3662a156/12OmNzICEL3",
"parentPublication": {
"id": "proceedings/cbmi/2009/3662/0",
"title": "Content-Based Multimedia Indexing, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2013/0602/0/06579495",
"title": "Music Mood Classification Using Intro and Refrain Parts of Lyrics",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2013/06579495/12OmNzxyiAG",
"parentPublication": {
"id": "proceedings/icisa/2013/0602/0",
"title": "2013 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fit/2018/9355/0/935500a224",
"title": "Integration of Speech/ Music Discrimination and Mood Classification with Audio Feature Extraction",
"doi": null,
"abstractUrl": "/proceedings-article/fit/2018/935500a224/17D45XacGjJ",
"parentPublication": {
"id": "proceedings/fit/2018/9355/0",
"title": "2018 International Conference on Frontiers of Information Technology (FIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvzJG4b",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "2",
"displayVolume": "2",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz5s0E4",
"doi": "10.1109/ICME.2004.1394329",
"title": "Color-mood analysis of films based on syntactic and psychological models",
"normalizedTitle": "Color-mood analysis of films based on syntactic and psychological models",
"abstract": "The emergence of peer-to-peer networking and the increase of home PC storage capacity are necessitating efficient scaleable methods for video clustering, recommending and browsing. Based on film theories and psychological models, color-mood is an important factor affecting user emotional preferences. We propose a compact set of features for color-mood analysis and subgenre discrimination. We introduce two color representations for scenes and full films in order to extract the essential moods from the films: a global measure for the color palette and a discriminative measure for the transitions of the moods in the movie. We captured the dominant color ratio and the pace of the movie. Despite the simplicity and efficiency of the features, the classification accuracy was surprisingly good, about 80%, possibly thanks to the prevalence of the color-mood association in feature films",
"abstracts": [
{
"abstractType": "Regular",
"content": "The emergence of peer-to-peer networking and the increase of home PC storage capacity are necessitating efficient scaleable methods for video clustering, recommending and browsing. Based on film theories and psychological models, color-mood is an important factor affecting user emotional preferences. We propose a compact set of features for color-mood analysis and subgenre discrimination. We introduce two color representations for scenes and full films in order to extract the essential moods from the films: a global measure for the color palette and a discriminative measure for the transitions of the moods in the movie. We captured the dominant color ratio and the pace of the movie. Despite the simplicity and efficiency of the features, the classification accuracy was surprisingly good, about 80%, possibly thanks to the prevalence of the color-mood association in feature films",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The emergence of peer-to-peer networking and the increase of home PC storage capacity are necessitating efficient scaleable methods for video clustering, recommending and browsing. Based on film theories and psychological models, color-mood is an important factor affecting user emotional preferences. We propose a compact set of features for color-mood analysis and subgenre discrimination. We introduce two color representations for scenes and full films in order to extract the essential moods from the films: a global measure for the color palette and a discriminative measure for the transitions of the moods in the movie. We captured the dominant color ratio and the pace of the movie. Despite the simplicity and efficiency of the features, the classification accuracy was surprisingly good, about 80%, possibly thanks to the prevalence of the color-mood association in feature films",
"fno": "01394329",
"keywords": [
"Entertainment",
"Image Colour Analysis",
"Pattern Classification",
"Psychology",
"Video Signal Processing",
"Color Mood Analysis",
"Syntactic Models",
"Psychological Models",
"Peer To Peer Networking",
"Home PC Storage Capacity",
"Video Clustering",
"Video Recommending",
"Video Browsing",
"Film Theories",
"Emotional Preferences",
"Subgenre Discrimination",
"Color Representations",
"Color Palette",
"Dominant Color Ratio",
"Film Entertainment",
"Psychology",
"Mood",
"Color",
"Layout",
"Motion Pictures",
"Histograms",
"Support Vector Machines",
"Support Vector Machine Classification",
"Peer To Peer Computing",
"Art"
],
"authors": [
{
"affiliation": "Dept. of Electr. Eng., Columbia Univ., New York, NY, USA",
"fullName": "Cheng-Yu Wei",
"givenName": null,
"surname": "Cheng-Yu Wei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "N. Dimitrova",
"givenName": "N.",
"surname": "Dimitrova",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shih-Fu Chang",
"givenName": null,
"surname": "Shih-Fu Chang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-01-01T00:00:00",
"pubType": "proceedings",
"pages": "831,832,833,834",
"year": "2004",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01394328",
"articleId": "12OmNzvQHRi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01394330",
"articleId": "12OmNAm4TFS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2017/2937/0/2937a185",
"title": "Computational and Perceptual Determinants of Film Mood in Different Types of Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a185/12OmNAYGlwT",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/collaboratecom/2011/0683/0/06144812",
"title": "Supporting mood awareness in collaborative settings",
"doi": null,
"abstractUrl": "/proceedings-article/collaboratecom/2011/06144812/12OmNBVrjor",
"parentPublication": {
"id": "proceedings/collaboratecom/2011/0683/0",
"title": "7th International Conference on Collaborative Computing: Networking, Applications and Worksharing (CollaborateCom 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2014/5569/0/06970232",
"title": "Mood metadata for video games and interactive media",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2014/06970232/12OmNCzsKGA",
"parentPublication": {
"id": "proceedings/jcdl/2014/5569/0",
"title": "2014 IEEE/ACM Joint Conference on Digital Libraries (JCDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ams/2012/4730/0/4730a007",
"title": "Automatic Mood Classification Model for Indian Popular Music",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2012/4730a007/12OmNqAU6DC",
"parentPublication": {
"id": "proceedings/ams/2012/4730/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icacc/2012/4723/0/4723a078",
"title": "Domain Specific Sentence Level Mood Extraction from Malayalam Text",
"doi": null,
"abstractUrl": "/proceedings-article/icacc/2012/4723a078/12OmNqFa5oV",
"parentPublication": {
"id": "proceedings/icacc/2012/4723/0",
"title": "2012 International Conference on Advances in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kse/2009/3846/0/3846a144",
"title": "Machine Learning Approaches for Mood Classification of Songs toward Music Search Engine",
"doi": null,
"abstractUrl": "/proceedings-article/kse/2009/3846a144/12OmNqIQS5H",
"parentPublication": {
"id": "proceedings/kse/2009/3846/0",
"title": "Knowledge and Systems Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349588",
"title": "Does the mood matter?",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349588/12OmNvonIGU",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsip/2014/5100/0/5100a359",
"title": "An Efficient Classification Algorithm for Music Mood Detection in Western and Hindi Music Using Audio Feature Extraction",
"doi": null,
"abstractUrl": "/proceedings-article/icsip/2014/5100a359/12OmNyRPgBk",
"parentPublication": {
"id": "proceedings/icsip/2014/5100/0",
"title": "2014 Fifth International Conference on Signal and Image Processing (ICSIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a151",
"title": "Color-Mood-Aware Clothing Re-texturing",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a151/12OmNyRxFnV",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/transai/2021/3412/0/341200a112",
"title": "Interpreting Keystrokes to Ascertain Human Mood",
"doi": null,
"abstractUrl": "/proceedings-article/transai/2021/341200a112/1xNNAfSVc6A",
"parentPublication": {
"id": "proceedings/transai/2021/3412/0",
"title": "2021 Third International Conference on Transdisciplinary AI (TransAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKir9",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45Xh13sX",
"doi": "10.1109/iV.2018.00083",
"title": "A Pilot Study: VR and Binaural Sounds for Mood Management",
"normalizedTitle": "A Pilot Study: VR and Binaural Sounds for Mood Management",
"abstract": "Virtual Reality is defined as the implementation of a virtual world that the user perceives as the real one. This can lead having the physical feeling of teleportation into another environment, forgetting the real world and even the physical body. This sensation of immersion affects the stimulus (visual, acoustic and haptic) perceived by the user and it is able to modify the brainwaves power. We think that this can be profitable for pain relief, as the patient feels many synchronized stimulus and he/she needs to be concentrated to process all the information and attenuate the pain sensation or change the initial mood. For that reason, this work proposes a pilot study of a VR environment combined with binaural beats, colors and movements to evaluate the perception the user has. It is believed that the use of different binaural beats in a long period of time can help patients to induce a relaxation state (mood) and consequently the perception to pain. The results of this work can be helpful for developing a pain management system with several configurable situations (VR scene, Colour & Sound combination, etc.). In this pilot study we apply 8 types of binaural sounds in a standard common VR scenario and we propose the end users to select the experimented feeling they felt in any case.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality is defined as the implementation of a virtual world that the user perceives as the real one. This can lead having the physical feeling of teleportation into another environment, forgetting the real world and even the physical body. This sensation of immersion affects the stimulus (visual, acoustic and haptic) perceived by the user and it is able to modify the brainwaves power. We think that this can be profitable for pain relief, as the patient feels many synchronized stimulus and he/she needs to be concentrated to process all the information and attenuate the pain sensation or change the initial mood. For that reason, this work proposes a pilot study of a VR environment combined with binaural beats, colors and movements to evaluate the perception the user has. It is believed that the use of different binaural beats in a long period of time can help patients to induce a relaxation state (mood) and consequently the perception to pain. The results of this work can be helpful for developing a pain management system with several configurable situations (VR scene, Colour & Sound combination, etc.). In this pilot study we apply 8 types of binaural sounds in a standard common VR scenario and we propose the end users to select the experimented feeling they felt in any case.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality is defined as the implementation of a virtual world that the user perceives as the real one. This can lead having the physical feeling of teleportation into another environment, forgetting the real world and even the physical body. This sensation of immersion affects the stimulus (visual, acoustic and haptic) perceived by the user and it is able to modify the brainwaves power. We think that this can be profitable for pain relief, as the patient feels many synchronized stimulus and he/she needs to be concentrated to process all the information and attenuate the pain sensation or change the initial mood. For that reason, this work proposes a pilot study of a VR environment combined with binaural beats, colors and movements to evaluate the perception the user has. It is believed that the use of different binaural beats in a long period of time can help patients to induce a relaxation state (mood) and consequently the perception to pain. The results of this work can be helpful for developing a pain management system with several configurable situations (VR scene, Colour & Sound combination, etc.). In this pilot study we apply 8 types of binaural sounds in a standard common VR scenario and we propose the end users to select the experimented feeling they felt in any case.",
"fno": "720200a442",
"keywords": [
"Acoustic Signal Processing",
"Medical Signal Processing",
"Virtual Reality",
"Mood Management",
"Virtual World",
"Brainwaves Power",
"Pain Relief",
"Synchronized Stimulus",
"Pain Sensation",
"VR Environment",
"Relaxation State",
"Pain Management System",
"VR Scene",
"Binaural Sounds",
"Binaural Beats",
"Virtual Reality",
"Color",
"Pain",
"Visualization",
"Mood",
"Ear",
"Virtual Reality",
"Electroencephalography",
"Virtual Reality",
"Brainwaves",
"Binaural Beats",
"Visual And Sound Stimulus",
"Unity"
],
"authors": [
{
"affiliation": null,
"fullName": "Francisco J. Perales",
"givenName": "Francisco J.",
"surname": "Perales",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Miguel Sanchez",
"givenName": "Miguel",
"surname": "Sanchez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Laia Riera",
"givenName": "Laia",
"surname": "Riera",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Silvia Ramis",
"givenName": "Silvia",
"surname": "Ramis",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "442-447",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7202-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "720200a438",
"articleId": "17D45WaTklm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "720200a448",
"articleId": "17D45XeKgvF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2004/8484/4/01326773",
"title": "Head-tracking and subject positioning using binaural headset microphones and common modulation anchor sources",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326773/12OmNCbCs1F",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/4",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icime/2018/7616/0/761600a020",
"title": "A Study on the Effects of Binaural Listening Materials on Second Language Listening Comprehension",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2018/761600a020/17D45WB0qcn",
"parentPublication": {
"id": "proceedings/icime/2018/7616/0",
"title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577121",
"title": "Importance of binaural cues of depth in low-resolution audio-visual 3D scene reproductions",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577121/17D45Xtvpaj",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500c151",
"title": "Beyond Mono to Binaural: Generating Binaural Audio from Mono Audio with Depth and Cross Modal Attention",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500c151/1B13eQsIoQE",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09726801",
"title": "Binaural SoundNet: Predicting Semantics, Depth and Motion With Binaural Sounds",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09726801/1BrwkoWzJEk",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a222",
"title": "Conceptual Design of Emotional and Pain Expressions of a Virtual Patient in a Virtual Reality Training for Paramedics",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a222/1CJcFRRMLV6",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a780",
"title": "Virtual Reality-Based Distraction on Pain and Performance during and after Moderate-Vigorous Intensity Cycling",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a780/1CJdFouLQ08",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09954178",
"title": "DeepEar: Sound Localization With Binaural Microphones",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09954178/1InotKTCOis",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797681",
"title": "VR and Volitional Pain: Testing Immersive Interventions During a Tattoo",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797681/1cJ1hLgyRKU",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090611",
"title": "Pain Experience in Social VR: The Competing Effect on Objective Pain Tolerance and Subjective Pain Perception",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090611/1jIxokdBogo",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1TNjWjtK",
"doi": "10.1109/ISMAR-Adjunct.2018.00098",
"title": "The Trouble with Augmented Reality/Virtual Reality Authoring Tools",
"normalizedTitle": "The Trouble with Augmented Reality/Virtual Reality Authoring Tools",
"abstract": "There are many technical and design challenges in creating new, usable and useful AR/VR applications. In particular, non-technical designers and end-users are facing a lack of tools to quickly and easily prototype and test new AR/VR user experiences. We review and classify existing AR/VR authoring tools and characterize three primary issues with these tools based on our review and a case study. To address the issues, we discuss two new tools we designed with support for rapid prototyping of new AR/VR content and gesture-based interactions geared towards designers without technical knowledge in gesture recognition, 3D modeling, and programming.",
"abstracts": [
{
"abstractType": "Regular",
"content": "There are many technical and design challenges in creating new, usable and useful AR/VR applications. In particular, non-technical designers and end-users are facing a lack of tools to quickly and easily prototype and test new AR/VR user experiences. We review and classify existing AR/VR authoring tools and characterize three primary issues with these tools based on our review and a case study. To address the issues, we discuss two new tools we designed with support for rapid prototyping of new AR/VR content and gesture-based interactions geared towards designers without technical knowledge in gesture recognition, 3D modeling, and programming.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "There are many technical and design challenges in creating new, usable and useful AR/VR applications. In particular, non-technical designers and end-users are facing a lack of tools to quickly and easily prototype and test new AR/VR user experiences. We review and classify existing AR/VR authoring tools and characterize three primary issues with these tools based on our review and a case study. To address the issues, we discuss two new tools we designed with support for rapid prototyping of new AR/VR content and gesture-based interactions geared towards designers without technical knowledge in gesture recognition, 3D modeling, and programming.",
"fno": "08699236",
"keywords": [
"Augmented Reality",
"Authoring Systems",
"Gesture Recognition",
"Human Computer Interaction",
"Software Prototyping",
"User Experience",
"Rapid Prototyping",
"Gesture Based Interactions",
"Virtual Reality Authoring Tools",
"Augmented Reality Authoring Tools",
"VR Authoring Tools",
"AR Authoring Tools",
"VR User Experiences",
"AR User Experiences",
"Tools",
"Three Dimensional Displays",
"Solid Modeling",
"Animation",
"Programming",
"Authoring Systems",
"Tracking",
"Augmented Reality",
"Virtual Reality",
"Authoring",
"Design",
"Rapid Prototyping",
"3 D Modeling",
"Gestures",
"Wizard Of Oz",
"Human Centered Computing",
"Interaction Paradigms",
"Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "University of Michigan School of Information",
"fullName": "Michael Nebeling",
"givenName": "Michael",
"surname": "Nebeling",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Michigan School of Information",
"fullName": "Maximilian Speicher",
"givenName": "Maximilian",
"surname": "Speicher",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "333-337",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699188",
"articleId": "19F1OzSANdS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699303",
"articleId": "19F1QvQo2wo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a164",
"title": "[POSTER] Authoring Tools in Augmented Reality: An Analysis and Classification of Content Design Tools",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a164/12OmNBLdKJC",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2015/8454/0/07344162",
"title": "From reality to augmented reality: Rapid strategies for developing marker-based AR content using image capturing and authoring tools",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2015/07344162/12OmNx8wTjN",
"parentPublication": {
"id": "proceedings/fie/2015/8454/0",
"title": "2015 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2002/1781/0/17810237",
"title": "A Pragmatic Approach to Augmented Reality Authoring",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2002/17810237/12OmNxV4iuj",
"parentPublication": {
"id": "proceedings/ismar/2002/1781/0",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/08/08611113",
"title": "MARVisT: Authoring Glyph-Based Visualization in Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/08/08611113/17D45Wuc367",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a935",
"title": "Immersive Animation Authoring in Industrial VR Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a935/1J7Wg05jJeM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798148",
"title": "CAVE-AR: A VR Authoring System to Interactively Design, Simulate, and Debug Multi-user AR Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798148/1cJ0FRS6rjG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797973",
"title": "A Comparison of Desktop and Augmented Reality Scenario Based Training Authoring Tools",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797973/1cJ0S2MS49O",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a144",
"title": "Hand ControlAR: An Augmented Reality Application for Learning 3D Geometry",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a144/1gysoyOrm2A",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a044",
"title": "A Novel Tool for Immersive Authoring of Experiential Learning in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a044/1tnWQy5llCg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a633",
"title": "Immersive Authoring of Virtual Reality Training",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a633/1tnXNG6t1x6",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxyiybRDO",
"doi": "10.1109/VRW50115.2020.00038",
"title": "Authoring-by-Doing: Animating Work Instructions for Industrial Virtual Reality Learning Environments",
"normalizedTitle": "Authoring-by-Doing: Animating Work Instructions for Industrial Virtual Reality Learning Environments",
"abstract": "With the rise of Virtual Reality, gaming is about to reach a new level of realism. The industry sector has recognized the technology’s potential too, especially for learning and training of assembly procedures and maintenance tasks. Although gaming and manufacturing industry seem to have different requirements at the first sight, both worlds can benefit from each other. One of the most cumbersome tasks when creating a VR application is the authoring of content, more specific the animation of characters and interactable objects. This paper describes the prototype for a Virtual Reality-supported learning and training application and presents a concept to simplify the authoring process of content with additional focus on animating assembly procedures. Our idea is to record actions performed by an expert in the VR environment. Trainees can watch the recorded actions as semi-transparent “ghost” animation and interactively mimic the set of instructions. By this “authoring-by-doing” approach, we hope to accelerate content creation for industrial VR-learning scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the rise of Virtual Reality, gaming is about to reach a new level of realism. The industry sector has recognized the technology’s potential too, especially for learning and training of assembly procedures and maintenance tasks. Although gaming and manufacturing industry seem to have different requirements at the first sight, both worlds can benefit from each other. One of the most cumbersome tasks when creating a VR application is the authoring of content, more specific the animation of characters and interactable objects. This paper describes the prototype for a Virtual Reality-supported learning and training application and presents a concept to simplify the authoring process of content with additional focus on animating assembly procedures. Our idea is to record actions performed by an expert in the VR environment. Trainees can watch the recorded actions as semi-transparent “ghost” animation and interactively mimic the set of instructions. By this “authoring-by-doing” approach, we hope to accelerate content creation for industrial VR-learning scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the rise of Virtual Reality, gaming is about to reach a new level of realism. The industry sector has recognized the technology’s potential too, especially for learning and training of assembly procedures and maintenance tasks. Although gaming and manufacturing industry seem to have different requirements at the first sight, both worlds can benefit from each other. One of the most cumbersome tasks when creating a VR application is the authoring of content, more specific the animation of characters and interactable objects. This paper describes the prototype for a Virtual Reality-supported learning and training application and presents a concept to simplify the authoring process of content with additional focus on animating assembly procedures. Our idea is to record actions performed by an expert in the VR environment. Trainees can watch the recorded actions as semi-transparent “ghost” animation and interactively mimic the set of instructions. By this “authoring-by-doing” approach, we hope to accelerate content creation for industrial VR-learning scenarios.",
"fno": "09090665",
"keywords": [
"Animation",
"Training",
"Tools",
"Three Dimensional Displays",
"Games",
"Task Analysis",
"Prototypes",
"Virtual Reality",
"Animation",
"Industrial Training",
"Gaming",
"Education",
"Industrial Training",
"Electronic Design Automation And Methodology",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Center of Excellence for Smart Production, University of Applied Sciences Upper Austria,School of Business and Management,Wehrgrabengasse 1-3,Steyr,Austria,4400",
"fullName": "Josef Wolfartsberger",
"givenName": "Josef",
"surname": "Wolfartsberger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center of Excellence for Smart Production, University of Applied Sciences Upper Austria,School of Business and Management,Wehrgrabengasse 1-3,Steyr,Austria,4400",
"fullName": "Daniel Niedermayr",
"givenName": "Daniel",
"surname": "Niedermayr",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "173-176",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090656",
"articleId": "1jIxzMPf8g8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090483",
"articleId": "1jIxtye9aEg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgi/2005/9330/0/01500374",
"title": "Toward gesture-based behavior authoring",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2005/01500374/12OmNBscCXp",
"parentPublication": {
"id": "proceedings/cgi/2005/9330/0",
"title": "Computer Graphics International 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446550",
"title": "AnimationVR - Interactive Controller-Based Animating in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446550/13bd1fph1xN",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446332",
"title": "AnimationVR - Interactive Controller-Based Animating in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446332/13bd1ftOBCY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699236",
"title": "The Trouble with Augmented Reality/Virtual Reality Authoring Tools",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699236/19F1TNjWjtK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714052",
"title": "PoVRPoint: Authoring Presentations in Mobile Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714052/1B0Y1Tyx2PC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a935",
"title": "Immersive Animation Authoring in Industrial VR Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a935/1J7Wg05jJeM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a064",
"title": "Smart Motion Trails for Animating in VR",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a064/1KmFbVCEHxm",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798181",
"title": "VR as a Content Creation Tool for Movie Previsualisation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798181/1cJ0KPnDSV2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090677",
"title": "PoseMMR: A Collaborative Mixed Reality Authoring Tool for Character Animation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090677/1jIxxM75R4I",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a174",
"title": "An AR Work Instructions Authoring Tool for Human-Operated Industrial Assembly Lines",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a174/1qpzDvRJytG",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxz87dXEs",
"doi": "10.1109/VRW50115.2020.00075",
"title": "It Is Complicated: Interacting with Children in Social Virtual Reality",
"normalizedTitle": "It Is Complicated: Interacting with Children in Social Virtual Reality",
"abstract": "Social VR refers to 3D virtual spaces where multiple users can interact with one another through VR head-mounted displays. These novel digital spaces are dramatically transforming how people meet, interact, and socialize online and have attracted users of different age groups and maturity levels. This variety sometimes leads to less desirable interactions, tensions, and frustrations between different user bases (e.g., adults and children). Based on 30 interviews, we focus on how people perceive and experience interacting with young users across various social VR applications. We aim at better understanding the complex social interaction dynamics afforded by social VR. We also discuss potential design implications toward a more child centered design for future social VR platforms.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Social VR refers to 3D virtual spaces where multiple users can interact with one another through VR head-mounted displays. These novel digital spaces are dramatically transforming how people meet, interact, and socialize online and have attracted users of different age groups and maturity levels. This variety sometimes leads to less desirable interactions, tensions, and frustrations between different user bases (e.g., adults and children). Based on 30 interviews, we focus on how people perceive and experience interacting with young users across various social VR applications. We aim at better understanding the complex social interaction dynamics afforded by social VR. We also discuss potential design implications toward a more child centered design for future social VR platforms.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Social VR refers to 3D virtual spaces where multiple users can interact with one another through VR head-mounted displays. These novel digital spaces are dramatically transforming how people meet, interact, and socialize online and have attracted users of different age groups and maturity levels. This variety sometimes leads to less desirable interactions, tensions, and frustrations between different user bases (e.g., adults and children). Based on 30 interviews, we focus on how people perceive and experience interacting with young users across various social VR applications. We aim at better understanding the complex social interaction dynamics afforded by social VR. We also discuss potential design implications toward a more child centered design for future social VR platforms.",
"fno": "09090445",
"keywords": [
"Games",
"Pediatrics",
"Interviews",
"Conferences",
"Three Dimensional Displays",
"Avatars",
"Human Centered Computing",
"Social VR",
"Children"
],
"authors": [
{
"affiliation": "Clemson University",
"fullName": "Divine Maloney",
"givenName": "Divine",
"surname": "Maloney",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Guo Freeman",
"givenName": "Guo",
"surname": "Freeman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Andrew Robb",
"givenName": "Andrew",
"surname": "Robb",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "343-347",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090462",
"articleId": "1jIxwYCmrRe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090528",
"articleId": "1jIxxI8I2aI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892258",
"title": "All are welcome: Using VR ethnography to explore harassment behavior in immersive social virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892258/12OmNx0A7Fw",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dcve/2016/2138/0/07563561",
"title": "Bringing real objects, spaces, actions, and interactions into social VR",
"doi": null,
"abstractUrl": "/proceedings-article/3dcve/2016/07563561/12OmNxGj9Qx",
"parentPublication": {
"id": "proceedings/3dcve/2016/2138/0",
"title": "2016 IEEE Third VR International Workshop on Collaborative Virtual Environments (3DCVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iset/2017/3031/0/08005402",
"title": "The Application of Traditional Games to Develop Social and Gross Motor Skills in 6-7 Year-old Children",
"doi": null,
"abstractUrl": "/proceedings-article/iset/2017/08005402/12OmNzwZ6nK",
"parentPublication": {
"id": "proceedings/iset/2017/3031/0",
"title": "2017 International Symposium on Educational Technology (ISET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/spw/2018/8276/0/634901a186",
"title": "Forensic Analysis of Immersive Virtual Reality Social Applications: A Primary Account",
"doi": null,
"abstractUrl": "/proceedings-article/spw/2018/634901a186/12UTFDIoiEE",
"parentPublication": {
"id": "proceedings/spw/2018/8276/0",
"title": "2018 IEEE Security and Privacy Workshops (SPW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a742",
"title": "Social Presence in VR Empathy Game for Children: Empathic Interaction with the Virtual Characters",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a742/1CJfetqDtnO",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798100",
"title": "Towards a Framework on Accessible and Social VR in Education",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798100/1cJ16Rutlm0",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798165",
"title": "Harassment in Social VR: Implications for Design",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798165/1cJ1eZOh6wM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090683",
"title": "The Effects of Avatar Visibility on Behavioral Response with or without Mirror-Visual Feedback in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090683/1jIxzZ4gw4E",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a060",
"title": "Photorealistic avatars to enhance the efficacy of Selfattachment psychotherapy",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a060/1qpzCwDcDKM",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a271",
"title": "Social Virtual Reality: Ethical Considerations and Future Directions for An Emerging Research Space",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a271/1tnXmA2qUlW",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysxojAVAk",
"doi": "10.1109/ISMAR50242.2020.00063",
"title": "Pen-based Interaction with Spreadsheets in Mobile Virtual Reality",
"normalizedTitle": "Pen-based Interaction with Spreadsheets in Mobile Virtual Reality",
"abstract": "Virtual Reality (VR) can enhance the display and interaction of mobile knowledge work and in particular, spreadsheet applications. While spreadsheets are widely used yet are challenging to interact with, especially on mobile devices, using them in VR has not been explored in depth. A special uniqueness of the domain is the contrast between the immersive and large display space afforded by VR, contrasted by the very limited interaction space that may be afforded for the information worker on the go, such as an airplane seat or a small work-space. To close this gap, we present a tool-set for enhancing spreadsheet interaction on tablets using immersive VR headsets and pen-based input. This combination opens up many possibilities for enhancing the productivity for spreadsheet interaction. We propose to use the space around and in front of the tablet for enhanced visualization of spreadsheet data and meta-data. For example, extending sheet display beyond the bounds of the physical screen, or easier debugging by uncovering hidden dependencies between sheet's cells. Combining the precise on-screen input of a pen with spatial sensing around the tablet, we propose tools for the efficient creation and editing of spreadsheets functions such as off-the-screen layered menus, visualization of sheets dependencies, and gaze-and-touch-based switching between spreadsheet tabs. We study the feasibility of the proposed tool-set using a video-based online survey and an expert-based assessment of indicative human performance potential.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality (VR) can enhance the display and interaction of mobile knowledge work and in particular, spreadsheet applications. While spreadsheets are widely used yet are challenging to interact with, especially on mobile devices, using them in VR has not been explored in depth. A special uniqueness of the domain is the contrast between the immersive and large display space afforded by VR, contrasted by the very limited interaction space that may be afforded for the information worker on the go, such as an airplane seat or a small work-space. To close this gap, we present a tool-set for enhancing spreadsheet interaction on tablets using immersive VR headsets and pen-based input. This combination opens up many possibilities for enhancing the productivity for spreadsheet interaction. We propose to use the space around and in front of the tablet for enhanced visualization of spreadsheet data and meta-data. For example, extending sheet display beyond the bounds of the physical screen, or easier debugging by uncovering hidden dependencies between sheet's cells. Combining the precise on-screen input of a pen with spatial sensing around the tablet, we propose tools for the efficient creation and editing of spreadsheets functions such as off-the-screen layered menus, visualization of sheets dependencies, and gaze-and-touch-based switching between spreadsheet tabs. We study the feasibility of the proposed tool-set using a video-based online survey and an expert-based assessment of indicative human performance potential.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality (VR) can enhance the display and interaction of mobile knowledge work and in particular, spreadsheet applications. While spreadsheets are widely used yet are challenging to interact with, especially on mobile devices, using them in VR has not been explored in depth. A special uniqueness of the domain is the contrast between the immersive and large display space afforded by VR, contrasted by the very limited interaction space that may be afforded for the information worker on the go, such as an airplane seat or a small work-space. To close this gap, we present a tool-set for enhancing spreadsheet interaction on tablets using immersive VR headsets and pen-based input. This combination opens up many possibilities for enhancing the productivity for spreadsheet interaction. We propose to use the space around and in front of the tablet for enhanced visualization of spreadsheet data and meta-data. For example, extending sheet display beyond the bounds of the physical screen, or easier debugging by uncovering hidden dependencies between sheet's cells. Combining the precise on-screen input of a pen with spatial sensing around the tablet, we propose tools for the efficient creation and editing of spreadsheets functions such as off-the-screen layered menus, visualization of sheets dependencies, and gaze-and-touch-based switching between spreadsheet tabs. We study the feasibility of the proposed tool-set using a video-based online survey and an expert-based assessment of indicative human performance potential.",
"fno": "850800a361",
"keywords": [
"Data Visualisation",
"Helmet Mounted Displays",
"Mobile Computing",
"Spreadsheet Programs",
"User Interfaces",
"Virtual Reality",
"Immersive VR Headsets",
"Spreadsheet Interaction",
"Enhanced Visualization",
"Spreadsheet Data",
"Sheet Display",
"Spreadsheets Functions",
"Spreadsheet Tabs",
"Tool Set",
"Video Based",
"Expert Based Assessment",
"Pen Based Interaction",
"Mobile Virtual Reality",
"Mobile Knowledge Work",
"Spreadsheet Applications",
"Mobile Devices",
"Immersive Display Space",
"Large Display Space",
"Interaction Space",
"Work Space",
"Gaze And Touch Based Switching",
"Productivity",
"Headphones",
"Switches",
"Debugging",
"Tools",
"Mobile Handsets",
"Sensors"
],
"authors": [
{
"affiliation": "Coburg University of Applied Sciences and Arts",
"fullName": "Travis Gesslein",
"givenName": "Travis",
"surname": "Gesslein",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts",
"fullName": "Verena Biener",
"givenName": "Verena",
"surname": "Biener",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts",
"fullName": "Philipp Gagel",
"givenName": "Philipp",
"surname": "Gagel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts",
"fullName": "Daniel Schneider",
"givenName": "Daniel",
"surname": "Schneider",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Cambridge",
"fullName": "Per Ola Kristensson",
"givenName": "Per Ola",
"surname": "Kristensson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research",
"fullName": "Eyal Ofek",
"givenName": "Eyal",
"surname": "Ofek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research",
"fullName": "Michel Pahud",
"givenName": "Michel",
"surname": "Pahud",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts",
"fullName": "Jens Grubert",
"givenName": "Jens",
"surname": "Grubert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "361-373",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a350",
"articleId": "1pysyvL4CwU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a374",
"articleId": "1pysuXX1aBq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vlhcc/2016/0252/0/07739690",
"title": "Polaris: Providing context aware navigation in spreadsheets",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2016/07739690/12OmNrAMF5b",
"parentPublication": {
"id": "proceedings/vlhcc/2016/0252/0",
"title": "2016 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2014/4035/0/06883059",
"title": "A domain terms visualization tool for spreadsheets",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2014/06883059/12OmNwEJ0TX",
"parentPublication": {
"id": "proceedings/vlhcc/2014/4035/0",
"title": "2014 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issrew/2012/5048/0/06405435",
"title": "Debugging Spreadsheets: A CSP-based Approach",
"doi": null,
"abstractUrl": "/proceedings-article/issrew/2012/06405435/12OmNy7h35k",
"parentPublication": {
"id": "proceedings/issrew/2012/5048/0",
"title": "2012 IEEE 23rd International Symposium on Software Reliability Engineering Workshops (ISSREW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2013/0369/0/06645237",
"title": "An empirical study of spreadsheet authors' mental models in explaining and debugging tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2013/06645237/12OmNyoAAat",
"parentPublication": {
"id": "proceedings/vlhcc/2013/0369/0",
"title": "2013 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/das/2018/3346/0/3346a139",
"title": "Table Recognition in Spreadsheets via a Graph Representation",
"doi": null,
"abstractUrl": "/proceedings-article/das/2018/3346a139/12OmNyoSbg5",
"parentPublication": {
"id": "proceedings/das/2018/3346/0",
"title": "2018 13th IAPR International Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vissoft/2015/7526/0/07332439",
"title": "XVIZIT: Visualizing cognitive units in spreadsheets",
"doi": null,
"abstractUrl": "/proceedings-article/vissoft/2015/07332439/12OmNyz5K1q",
"parentPublication": {
"id": "proceedings/vissoft/2015/7526/0",
"title": "2015 IEEE 3rd Working Conference on Software Visualization (VISSOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2009/05/mso2009050025",
"title": "Software Engineering for Spreadsheets",
"doi": null,
"abstractUrl": "/magazine/so/2009/05/mso2009050025/13rRUxZRbmc",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714052",
"title": "PoVRPoint: Authoring Presentations in Mobile Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714052/1B0Y1Tyx2PC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2018/1970/0/197000a705",
"title": "Automated Repair of Data Faults in Templated Spreadsheets",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2018/197000a705/1b66pFpXBn2",
"parentPublication": {
"id": "proceedings/apsec/2018/1970/0",
"title": "2018 25th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a761",
"title": "Demonstrating the Use of Rapid Touch Interaction in Virtual Reality for Prolonged Interaction in Productivity Scenarios",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a761/1tnX9xsCTVC",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXDtGbdN6",
"doi": "10.1109/VRW52623.2021.00223",
"title": "Virtual Demonstrator for Spatial Presentations",
"normalizedTitle": "Virtual Demonstrator for Spatial Presentations",
"abstract": "To overcome the challenges of instructing on spatial concepts remotely, we propose Virtual Demonstrator, a virtual reality (VR) presentation software for educators. Virtual Demonstrator provides a suite of tools for creating spatial presentations in VR, analogous to 2D slide-based presentation software. Within our software, Visual Elements are combined across a spatiotemporal space, with discrete states akin to presentation slides. This educational resource is designed to address a growing need to learn without physical presence, and to leverage 3D spaces for learning complex spatial concepts. A timeline expands a 3D environment into another dimension, allowing for an expanded design space.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To overcome the challenges of instructing on spatial concepts remotely, we propose Virtual Demonstrator, a virtual reality (VR) presentation software for educators. Virtual Demonstrator provides a suite of tools for creating spatial presentations in VR, analogous to 2D slide-based presentation software. Within our software, Visual Elements are combined across a spatiotemporal space, with discrete states akin to presentation slides. This educational resource is designed to address a growing need to learn without physical presence, and to leverage 3D spaces for learning complex spatial concepts. A timeline expands a 3D environment into another dimension, allowing for an expanded design space.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To overcome the challenges of instructing on spatial concepts remotely, we propose Virtual Demonstrator, a virtual reality (VR) presentation software for educators. Virtual Demonstrator provides a suite of tools for creating spatial presentations in VR, analogous to 2D slide-based presentation software. Within our software, Visual Elements are combined across a spatiotemporal space, with discrete states akin to presentation slides. This educational resource is designed to address a growing need to learn without physical presence, and to leverage 3D spaces for learning complex spatial concepts. A timeline expands a 3D environment into another dimension, allowing for an expanded design space.",
"fno": "405700a681",
"keywords": [
"Computer Aided Instruction",
"Virtual Reality",
"Spatial Presentations",
"2 D Slide Based Presentation Software",
"Presentation Slides",
"Complex Spatial Concepts",
"Virtual Demonstrator",
"3 D Environment",
"VR",
"Visualization",
"Three Dimensional Displays",
"Conferences",
"Virtual Reality",
"User Interfaces",
"Tools",
"Software",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Applied Computing",
"Education",
"Interactive Learning Environments"
],
"authors": [
{
"affiliation": "University of Minnesota",
"fullName": "Maxwell Omdal",
"givenName": "Maxwell",
"surname": "Omdal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Minnesota",
"fullName": "David Kinney",
"givenName": "David",
"surname": "Kinney",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Minnesota",
"fullName": "Kiet Tran",
"givenName": "Kiet",
"surname": "Tran",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Minnesota",
"fullName": "Evan Suma Rosenberg",
"givenName": "Evan Suma",
"surname": "Rosenberg",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "681-682",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnXC2zynbq",
"name": "pvrw202140570-09419278s1-mm_405700a681.zip",
"size": "267 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419278s1-mm_405700a681.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a679",
"articleId": "1tnXEGmXdNC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a683",
"articleId": "1tnWA7Z0Xpm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vrw/2022/8402/0/840200a056",
"title": "Physics-based character animation for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a056/1CJdEcF4PjG",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a283",
"title": "TeachInVR: A virtual reality classroom for remote education",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a283/1CJerjEZuve",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a299",
"title": "Visualized Cues for Enhancing Spatial Ability Training in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a299/1CJfbuK0Yfe",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2022/5537/0/553700a507",
"title": "Virtual Reality for Software Engineering Presentations",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2022/553700a507/1KOveXxWzDy",
"parentPublication": {
"id": "proceedings/apsec/2022/5537/0",
"title": "2022 29th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl-hcc/2020/6901/0/09127272",
"title": "Impact of Spatial Interface Traversal on Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vl-hcc/2020/09127272/1lvPZi9tk2c",
"parentPublication": {
"id": "proceedings/vl-hcc/2020/6901/0",
"title": "2020 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a751",
"title": "How rad(-ical) is VRAD (Virtual Reality-Aided Design)?",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a751/1rSRbQTQHYY",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a546",
"title": "HMD Type and Spatial Ability: Effects on the Experiences and Learning of Students in Immersive Virtual Field Trips",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a546/1tnWWhoj1ba",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a588",
"title": "Multi-modal Spatial Object Localization in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a588/1tuAGAPl3Tq",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a582",
"title": "Head Up Visualization of Spatial Sound Sources in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a582/1tuAPlsZnMc",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09507320",
"title": "OctoPocus in VR: Using a Dynamic Guide for 3D Mid-Air Gestures in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09507320/1vNfMheqZ2w",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXoZM7Cw0",
"doi": "10.1109/VRW52623.2021.00048",
"title": "Comparing Virtual Constraints and a Physical Stylus for Planar Writing and Drawing in Virtual Reality",
"normalizedTitle": "Comparing Virtual Constraints and a Physical Stylus for Planar Writing and Drawing in Virtual Reality",
"abstract": "Air-drawing, or drawing without the use of a physical surface, is the dominant interaction metaphor for drawing or writing in Virtual Reality (VR). However, we typically use devices that are restricted to two-dimensional planes:mice, pen-and-paper, or the digital equivalent pen-and-tablet devices. We present results from a user study examining differences in performance, user-preference, and handwriting between two implementations of air drawing and passive haptic surface drawing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Air-drawing, or drawing without the use of a physical surface, is the dominant interaction metaphor for drawing or writing in Virtual Reality (VR). However, we typically use devices that are restricted to two-dimensional planes:mice, pen-and-paper, or the digital equivalent pen-and-tablet devices. We present results from a user study examining differences in performance, user-preference, and handwriting between two implementations of air drawing and passive haptic surface drawing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Air-drawing, or drawing without the use of a physical surface, is the dominant interaction metaphor for drawing or writing in Virtual Reality (VR). However, we typically use devices that are restricted to two-dimensional planes:mice, pen-and-paper, or the digital equivalent pen-and-tablet devices. We present results from a user study examining differences in performance, user-preference, and handwriting between two implementations of air drawing and passive haptic surface drawing.",
"fno": "405700a220",
"keywords": [
"Computer Displays",
"Haptic Interfaces",
"Virtual Reality",
"Pen And Paper",
"Air Drawing",
"Passive Haptic Surface Drawing",
"Virtual Constraints",
"Physical Stylus",
"Planar Writing",
"Virtual Reality",
"Air Drawing",
"Physical Surface",
"Dominant Interaction Metaphor",
"Pen And Tablet Devices",
"User Preference",
"Performance Evaluation",
"Three Dimensional Displays",
"Conferences",
"Virtual Reality",
"Writing",
"User Interfaces",
"Haptic Interfaces",
"Human Centered Computing",
"Empirical Studies In HCI",
"Graphics Input Devices",
"Computing Methodologies",
"Virtual Reality"
],
"authors": [
{
"affiliation": "University of Georgia,Department of Computer Science",
"fullName": "Brook Bowers",
"givenName": "Brook",
"surname": "Bowers",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Georgia,College of Engineering",
"fullName": "AJ Tuttle",
"givenName": "AJ",
"surname": "Tuttle",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Georgia,College of Engineering",
"fullName": "Andrew Rukangu",
"givenName": "Andrew",
"surname": "Rukangu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Georgia,College of Engineering",
"fullName": "Anton Franzluebbers",
"givenName": "Anton",
"surname": "Franzluebbers",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Georgia,College of Engineering",
"fullName": "Catherine Ball",
"givenName": "Catherine",
"surname": "Ball",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Georgia,College of Engineering",
"fullName": "Kyle Johnsen",
"givenName": "Kyle",
"surname": "Johnsen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "220-225",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a214",
"articleId": "1tnXoU9ycMM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a226",
"articleId": "1tnXniCSk8g",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/case/2007/1153/0/04341738",
"title": "Robot Drawing Techniques for Contoured Surface Using an Automated Sketching Platform",
"doi": null,
"abstractUrl": "/proceedings-article/case/2007/04341738/12OmNBOCWfO",
"parentPublication": {
"id": "proceedings/case/2007/1153/0",
"title": "3rd Annual IEEE Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/04/07547957",
"title": "Haptic Feedback Manipulation During Botulinum Toxin Injection Therapy for Focal Hand Dystonia Patients: A Possible New Assistive Strategy",
"doi": null,
"abstractUrl": "/journal/th/2016/04/07547957/13rRUwgQpqS",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/05/v1067",
"title": "Drawing on Air: Input Techniques for Controlled 3D Line Illustration",
"doi": null,
"abstractUrl": "/journal/tg/2007/05/v1067/13rRUwhpBO1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2014/03/mcg2014030022",
"title": "The IrPen: A 6-DOF Pen for Interaction with Tablet Computers",
"doi": null,
"abstractUrl": "/magazine/cg/2014/03/mcg2014030022/13rRUxOvecj",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699311",
"title": "Extended Workspace Using a Smartphone with a Depth Camera",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699311/19F1NSFj2sU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a974",
"title": "3DColAR: Exploring 3D Color Selection and Surface Painting for Head Worn Augmented Reality using Hand Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a974/1CJcAGzhwxq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2019/4050/0/08809589",
"title": "Passive Haptic Menus for Desk-Based and HMD-Projected Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2019/08809589/1cI61Rx4b9m",
"parentPublication": {
"id": "proceedings/wevr/2019/4050/0",
"title": "2019 IEEE 5th Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090532",
"title": "OVR Stylus: Designing Pen-Based 3D Input Devices for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090532/1jIxu0R4UZG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a749",
"title": "Demonstrating High-Precision and High-Fidelity Digital Inking for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a749/1tnXvUPX7yg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a306",
"title": "Flashpen: A High-Fidelity and High-Precision Multi-Surface Pen for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a306/1tuB16k4ef6",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXvUPX7yg",
"doi": "10.1109/VRW52623.2021.00257",
"title": "Demonstrating High-Precision and High-Fidelity Digital Inking for Virtual Reality",
"normalizedTitle": "Demonstrating High-Precision and High-Fidelity Digital Inking for Virtual Reality",
"abstract": "Pen computing has become popular with tablet and wall-screen computers for digital precision tasks such as writing, annotating, and drawing. Digital pens have been made possible by the developments in input sensing technologies integrated into such screens. Virtual Reality systems, however, largely detect input using cameras, whose update rates are insufficient for capturing pen input with the necessary fidelity. In this demonstration, we showcase a digital pen for VR that accurately digitizes writing and drawing, including small and quick turns. Our prototype Flashpen repurposes an optical flow sensor from gaming mice, which digitizes minute motions at over 8 kHz when dragged across a surface. We demonstrate several use-cases for Flashpen during interaction in VR, including sketching, selecting, annotating, and writing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Pen computing has become popular with tablet and wall-screen computers for digital precision tasks such as writing, annotating, and drawing. Digital pens have been made possible by the developments in input sensing technologies integrated into such screens. Virtual Reality systems, however, largely detect input using cameras, whose update rates are insufficient for capturing pen input with the necessary fidelity. In this demonstration, we showcase a digital pen for VR that accurately digitizes writing and drawing, including small and quick turns. Our prototype Flashpen repurposes an optical flow sensor from gaming mice, which digitizes minute motions at over 8 kHz when dragged across a surface. We demonstrate several use-cases for Flashpen during interaction in VR, including sketching, selecting, annotating, and writing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Pen computing has become popular with tablet and wall-screen computers for digital precision tasks such as writing, annotating, and drawing. Digital pens have been made possible by the developments in input sensing technologies integrated into such screens. Virtual Reality systems, however, largely detect input using cameras, whose update rates are insufficient for capturing pen input with the necessary fidelity. In this demonstration, we showcase a digital pen for VR that accurately digitizes writing and drawing, including small and quick turns. Our prototype Flashpen repurposes an optical flow sensor from gaming mice, which digitizes minute motions at over 8 kHz when dragged across a surface. We demonstrate several use-cases for Flashpen during interaction in VR, including sketching, selecting, annotating, and writing.",
"fno": "405700a749",
"keywords": [
"Computer Graphics",
"Flow Sensors",
"Human Computer Interaction",
"Image Sequences",
"Virtual Reality",
"Pen Computing",
"Wall Screen Computers",
"Digital Precision Tasks",
"Digital Pen",
"Input Sensing Technologies",
"Virtual Reality Systems",
"Pen Input",
"High Precision High Fidelity Digital Inking",
"Flashpen",
"Optical Flow Sensor",
"Computers",
"Three Dimensional Displays",
"Tracking",
"Conferences",
"Prototypes",
"Virtual Reality",
"Writing",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Devices",
"Graphics Input Devices"
],
"authors": [
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Hugo Romat",
"givenName": "Hugo",
"surname": "Romat",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Andreas Fender",
"givenName": "Andreas",
"surname": "Fender",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Manuel Meier",
"givenName": "Manuel",
"surname": "Meier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Christian Holz",
"givenName": "Christian",
"surname": "Holz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "749-750",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnXvyezunK",
"name": "pvrw202140570-09419237s1-mm_405700a749.zip",
"size": "59.7 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419237s1-mm_405700a749.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a747",
"articleId": "1tnX546Pe7e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a751",
"articleId": "1tnXwIViMy4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892383",
"title": "Gesture-based augmented reality annotation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892383/12OmNwJPMYX",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476609",
"title": "Poster: The NetEyes Collaborative, Augmented Reality, Digital Paper System",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476609/12OmNxvwoR1",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mobilware/2013/94/0/06775054",
"title": "A Natural Handwriting Algorithm for Tablets",
"doi": null,
"abstractUrl": "/proceedings-article/mobilware/2013/06775054/12OmNyaGeKE",
"parentPublication": {
"id": "proceedings/mobilware/2013/94/0",
"title": "2013 International Conference on MOBILe Wireless MiddleWARE, Operating Systems, and Applications (Mobilware)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446053",
"title": "High-Fidelity Interaction for Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446053/13bd1tl2omt",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/04/07547957",
"title": "Haptic Feedback Manipulation During Botulinum Toxin Injection Therapy for Focal Hand Dystonia Patients: A Possible New Assistive Strategy",
"doi": null,
"abstractUrl": "/journal/th/2016/04/07547957/13rRUwgQpqS",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2018/7308/0/08644615",
"title": "VPad: Virtual Writing Tablet for Laptops Leveraging Acoustic Signals",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2018/08644615/17QjJcpcgyq",
"parentPublication": {
"id": "proceedings/icpads/2018/7308/0",
"title": "2018 IEEE 24th International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/2/260702a245",
"title": "A Pen-Grip Shaped Device for Estimating Writing Pressure and Altitude",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260702a245/1cYisrOBFAc",
"parentPublication": {
"id": "compsac/2019/2607/2",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdarw/2019/5054/8/505408a013",
"title": "Online and Offline Data Collection of Japanese Handwriting",
"doi": null,
"abstractUrl": "/proceedings-article/icdarw/2019/505408a013/1eLyfjCbWb6",
"parentPublication": {
"id": "icdarw/2019/5054/8",
"title": "2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a220",
"title": "Comparing Virtual Constraints and a Physical Stylus for Planar Writing and Drawing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a220/1tnXoZM7Cw0",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a306",
"title": "Flashpen: A High-Fidelity and High-Precision Multi-Surface Pen for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a306/1tuB16k4ef6",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuB16k4ef6",
"doi": "10.1109/VR50410.2021.00053",
"title": "Flashpen: A High-Fidelity and High-Precision Multi-Surface Pen for Virtual Reality",
"normalizedTitle": "Flashpen: A High-Fidelity and High-Precision Multi-Surface Pen for Virtual Reality",
"abstract": "Digital pen interaction has become a first-class input modality for precision tasks such as writing, annotating, drawing, and 2D manipulation. The key enablers of digital inking are the capacitive or resistive sensors that are integrated in contemporary tablet devices. In Virtual Reality (VR), however, users typically provide input across large regions, hence limiting the suitability of using additional tablet devices for accurate pen input. In this paper, we present Flashpen, a digital pen for VR whose sensing principle affords accurately digitizing hand writing and intricate drawing, including small and quick turns. Flashpen re-purposes an inexpensive gaming mouse sensor that digitizes extremely fine grained motions in the micrometer range at over 8 kHz when moving on a surface. We combine Flashpen's high-fidelity relative input with the absolute tracking cues from a VR headset to enable pen interaction across a variety of VR applications. In our two-block evaluation, which consists of a tracing task and a writing task, we compare Flashpen to a professional drawing tablet (Wacom). With this, we demonstrate that Flashpen's fidelity matches the performance of state-of-the-art digitizers and approaches the fidelity of analog pens, while adding the flexibility of supporting a wide range of flat surfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Digital pen interaction has become a first-class input modality for precision tasks such as writing, annotating, drawing, and 2D manipulation. The key enablers of digital inking are the capacitive or resistive sensors that are integrated in contemporary tablet devices. In Virtual Reality (VR), however, users typically provide input across large regions, hence limiting the suitability of using additional tablet devices for accurate pen input. In this paper, we present Flashpen, a digital pen for VR whose sensing principle affords accurately digitizing hand writing and intricate drawing, including small and quick turns. Flashpen re-purposes an inexpensive gaming mouse sensor that digitizes extremely fine grained motions in the micrometer range at over 8 kHz when moving on a surface. We combine Flashpen's high-fidelity relative input with the absolute tracking cues from a VR headset to enable pen interaction across a variety of VR applications. In our two-block evaluation, which consists of a tracing task and a writing task, we compare Flashpen to a professional drawing tablet (Wacom). With this, we demonstrate that Flashpen's fidelity matches the performance of state-of-the-art digitizers and approaches the fidelity of analog pens, while adding the flexibility of supporting a wide range of flat surfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Digital pen interaction has become a first-class input modality for precision tasks such as writing, annotating, drawing, and 2D manipulation. The key enablers of digital inking are the capacitive or resistive sensors that are integrated in contemporary tablet devices. In Virtual Reality (VR), however, users typically provide input across large regions, hence limiting the suitability of using additional tablet devices for accurate pen input. In this paper, we present Flashpen, a digital pen for VR whose sensing principle affords accurately digitizing hand writing and intricate drawing, including small and quick turns. Flashpen re-purposes an inexpensive gaming mouse sensor that digitizes extremely fine grained motions in the micrometer range at over 8 kHz when moving on a surface. We combine Flashpen's high-fidelity relative input with the absolute tracking cues from a VR headset to enable pen interaction across a variety of VR applications. In our two-block evaluation, which consists of a tracing task and a writing task, we compare Flashpen to a professional drawing tablet (Wacom). With this, we demonstrate that Flashpen's fidelity matches the performance of state-of-the-art digitizers and approaches the fidelity of analog pens, while adding the flexibility of supporting a wide range of flat surfaces.",
"fno": "255600a306",
"keywords": [
"Interactive Devices",
"Virtual Reality",
"Hand Writing",
"Intricate Drawing",
"VR Headset",
"Tracing Task",
"Professional Drawing Tablet",
"Digitizers",
"Analog Pens",
"Flat Surfaces",
"High Precision Multisurface Pen",
"Virtual Reality",
"Digital Pen Interaction",
"Digital Inking",
"Capacitive Sensors",
"Resistive Sensors",
"Tablet Devices",
"Flashpen",
"High Fidelity Relative Input",
"Gaming Mouse Sensor",
"First Class Input Modality",
"Sensing Principle",
"Micrometer Range",
"Absolute Tracking Cues",
"Two Block Evaluation",
"Wacom",
"Fidelity Matches",
"Surface Reconstruction",
"Three Dimensional Displays",
"Tracking",
"Input Devices",
"Virtual Reality",
"Writing",
"Tools",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Devices",
"Graphics Input Devices"
],
"authors": [
{
"affiliation": "Department of Computer Science,ETH Zürich,Switzerland",
"fullName": "Hugo Romat",
"givenName": "Hugo",
"surname": "Romat",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science,ETH Zürich,Switzerland",
"fullName": "Andreas Fender",
"givenName": "Andreas",
"surname": "Fender",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science,ETH Zürich,Switzerland",
"fullName": "Manuel Meier",
"givenName": "Manuel",
"surname": "Meier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science,ETH Zürich,Switzerland",
"fullName": "Christian Holz",
"givenName": "Christian",
"surname": "Holz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "306-315",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tuAZRWgE6I",
"name": "pvr202118380-09417660s1-mm_255600a306.zip",
"size": "237 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417660s1-mm_255600a306.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "255600a296",
"articleId": "1tuAUBCWDio",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a316",
"articleId": "1tuB1QUgKJy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdar/2017/3586/9/3586j009",
"title": "A Case Study of the Relationship between Local Pen Action and Three Dimensional Shapes of Handwritten Strokes",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2017/3586j009/12OmNBU1jIY",
"parentPublication": {
"id": "icdar/2017/3586/9",
"title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sutc/2008/3158/0/3158a531",
"title": "Usability Comparison of Pen-Based Input for Young Children on Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/sutc/2008/3158a531/12OmNrNh0y2",
"parentPublication": {
"id": "proceedings/sutc/2008/3158/0",
"title": "2008 IEEE International Conference on Sensor Networks, Ubiquitous, and Trustworthy Computing (SUTC '08)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/axmedis/2006/2625/0/26250186",
"title": "Document Retrieval in Pen-Based Media Data",
"doi": null,
"abstractUrl": "/proceedings-article/axmedis/2006/26250186/12OmNy2Jtax",
"parentPublication": {
"id": "proceedings/axmedis/2006/2625/0",
"title": "2006 2nd International Conference on Automated Production of Cross Media",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446053",
"title": "High-Fidelity Interaction for Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446053/13bd1tl2omt",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049645",
"title": "GestureSurface: VR Sketching through Assembling Scaffold Surface with Non-Dominant Hand",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049645/1KYoyLX55fy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a156",
"title": "Holding Virtual Objects Using a Tablet for Tangible 3D Sketching in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a156/1gysiAZnF16",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090491",
"title": "Precision vs. Power Grip: A Comparison of Pen Grip Styles for Selection in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090491/1jIxqBC6XqU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a361",
"title": "Pen-based Interaction with Spreadsheets in Mobile Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a361/1pysxojAVAk",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a220",
"title": "Comparing Virtual Constraints and a Physical Stylus for Planar Writing and Drawing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a220/1tnXoZM7Cw0",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a749",
"title": "Demonstrating High-Precision and High-Fidelity Digital Inking for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a749/1tnXvUPX7yg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNANkoaf",
"title": "2016 10th International Conference on Next-Generation Mobile Applications, Security and Technologies (NGMAST)",
"acronym": "ngmast",
"groupId": "1001782",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrMZpyR",
"doi": "10.1109/NGMAST.2016.13",
"title": "Immersive Virtual Reality as a Supplement in the Rehabilitation Program of Post-Stroke Patients",
"normalizedTitle": "Immersive Virtual Reality as a Supplement in the Rehabilitation Program of Post-Stroke Patients",
"abstract": "In a time when prolonging the quality of life better is not just through long-established medical practices but by technology assisting humans, various projects are challenging the traditional methods of caring for a patient. In stroke, desktop virtual environments (DVEs) are being used to supplement the patient's rehabilitation. The type of rehabilitation provides another means for patients to actively participate in the rehabilitation program through motion mirroring using their television or computer screens. However, immersive virtual reality (VR) is also gaining popularity and is challenging the common DVEs in its effectivity through involving more sensory stimulation of the nervous system. The project aims to answer the questions: Would immersion of a patient in VR positively help in their rehabilitation and what are the patient's needs that has to be addressed for it to be effective? To supplement the patients in their rehabilitation program, a mobile VR application is created using VR peripherals available commercially. The basic principle is that immersion in VR will add more sensory and cognitive stimuli to the stroke patient's traditional hospital rehabilitation. Findings of the study show positive effects of including immersive VR in the rehabilitation program of post-stroke patients. In the present, little information is available on the effects of immersive VR in motor rehabilitation. The study quantifies and qualifies its use specifically in the motor rehabilitation of stroke patients.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In a time when prolonging the quality of life better is not just through long-established medical practices but by technology assisting humans, various projects are challenging the traditional methods of caring for a patient. In stroke, desktop virtual environments (DVEs) are being used to supplement the patient's rehabilitation. The type of rehabilitation provides another means for patients to actively participate in the rehabilitation program through motion mirroring using their television or computer screens. However, immersive virtual reality (VR) is also gaining popularity and is challenging the common DVEs in its effectivity through involving more sensory stimulation of the nervous system. The project aims to answer the questions: Would immersion of a patient in VR positively help in their rehabilitation and what are the patient's needs that has to be addressed for it to be effective? To supplement the patients in their rehabilitation program, a mobile VR application is created using VR peripherals available commercially. The basic principle is that immersion in VR will add more sensory and cognitive stimuli to the stroke patient's traditional hospital rehabilitation. Findings of the study show positive effects of including immersive VR in the rehabilitation program of post-stroke patients. In the present, little information is available on the effects of immersive VR in motor rehabilitation. The study quantifies and qualifies its use specifically in the motor rehabilitation of stroke patients.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In a time when prolonging the quality of life better is not just through long-established medical practices but by technology assisting humans, various projects are challenging the traditional methods of caring for a patient. In stroke, desktop virtual environments (DVEs) are being used to supplement the patient's rehabilitation. The type of rehabilitation provides another means for patients to actively participate in the rehabilitation program through motion mirroring using their television or computer screens. However, immersive virtual reality (VR) is also gaining popularity and is challenging the common DVEs in its effectivity through involving more sensory stimulation of the nervous system. The project aims to answer the questions: Would immersion of a patient in VR positively help in their rehabilitation and what are the patient's needs that has to be addressed for it to be effective? To supplement the patients in their rehabilitation program, a mobile VR application is created using VR peripherals available commercially. The basic principle is that immersion in VR will add more sensory and cognitive stimuli to the stroke patient's traditional hospital rehabilitation. Findings of the study show positive effects of including immersive VR in the rehabilitation program of post-stroke patients. In the present, little information is available on the effects of immersive VR in motor rehabilitation. The study quantifies and qualifies its use specifically in the motor rehabilitation of stroke patients.",
"fno": "07801465",
"keywords": [
"Medical Computing",
"Patient Rehabilitation",
"Virtual Reality",
"Immersive Virtual Reality",
"Post Stroke Patient Rehabilitation Program",
"DV Es",
"Desktop Virtual Environments",
"Motion Mirroring",
"Computer Screens",
"Mobile VR Application",
"Cognitive Stimuli",
"Hospital Rehabilitation",
"Motor Rehabilitation",
"Hospitals",
"Legged Locomotion",
"Virtual Reality",
"Atmospheric Measurements",
"Particle Measurements",
"Urban Areas",
"Head",
"Immersive Virtual Reality",
"Stroke Rehabilitation",
"Motor Rehabilitation",
"Virtual Reality Application"
],
"authors": [
{
"affiliation": null,
"fullName": "Maria Anna V. San Luis",
"givenName": "Maria Anna V. San",
"surname": "Luis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rowel O. Atienza",
"givenName": "Rowel O.",
"surname": "Atienza",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Amado M. San Luis",
"givenName": "Amado M. San",
"surname": "Luis",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ngmast",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-08-01T00:00:00",
"pubType": "proceedings",
"pages": "47-52",
"year": "2016",
"issn": "2161-2897",
"isbn": "978-1-5090-0949-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07801464",
"articleId": "12OmNvT2p2v",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07801466",
"articleId": "12OmNzgeLDY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/memea/2009/3598/0/05167961",
"title": "Evaluating the post-stroke patients progress using an Augmented Reality Rehabilitation system",
"doi": null,
"abstractUrl": "/proceedings-article/memea/2009/05167961/12OmNAZx8NS",
"parentPublication": {
"id": "proceedings/memea/2009/3598/0",
"title": "Medical Measurement and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2007/0905/0/04161053",
"title": "VR Aided Motor Training for Post-Stroke Rehabilitation: System Design, Clinical Test, Methodology for Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161053/12OmNBZHiiF",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/segah/2013/6165/0/06665319",
"title": "Chances for serious games in rehabilitation of stroke patients on the example of utilizing the Wii Fit Balance Board",
"doi": null,
"abstractUrl": "/proceedings-article/segah/2013/06665319/12OmNCcKQK9",
"parentPublication": {
"id": "proceedings/segah/2013/6165/0",
"title": "2013 IEEE 2nd International Conference on Serious Games and Applications for Health (SeGAH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571105",
"title": "Investigating the Trend of Virtual Reality-Based Stroke Rehabilitation Systems",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571105/12OmNCvLXZ2",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmcsn/2016/1093/0/1093a044",
"title": "An Interactive Upper-Limb Post-Stroke Rehabilitation System Integrating BCI-based Attention Monitoring and Virtual Reality Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/cmcsn/2016/1093a044/12OmNvq5jAC",
"parentPublication": {
"id": "proceedings/cmcsn/2016/1093/0",
"title": "2016 Third International Conference on Computing Measurement Control and Sensor Network (CMCSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/segah/2017/5482/0/07939262",
"title": "Robotic-assisted serious game for motor and cognitive post-stroke rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/segah/2017/07939262/12OmNz2TCz5",
"parentPublication": {
"id": "proceedings/segah/2017/5482/0",
"title": "2017 IEEE 5th International Conference on Serious Games and Applications for Health (SeGAH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/i-span/2018/8534/0/853400a253",
"title": "An Upper Extremity Rehabilitation System Using Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/i-span/2018/853400a253/17D45WWzW5h",
"parentPublication": {
"id": "proceedings/i-span/2018/8534/0",
"title": "2018 15th International Symposium on Pervasive Systems, Algorithms and Networks (I-SPAN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864513",
"title": "Virtual Reality Environment for the Cognitive Rehabilitation of Stroke Patients",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864513/1e5ZtlRT3SE",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ubi-media/2019/2820/0/282000a192",
"title": "A Kinect-Based System for Stroke Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/ubi-media/2019/282000a192/1iESfZs2fF6",
"parentPublication": {
"id": "proceedings/ubi-media/2019/2820/0",
"title": "2019 Twelfth International Conference on Ubi-Media Computing (Ubi-Media)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icectt/2020/9928/0/992800a074",
"title": "Game Scene Construction for Lower Limb Rehabilitation Robot Based on Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icectt/2020/992800a074/1oa5enWMmL6",
"parentPublication": {
"id": "proceedings/icectt/2020/9928/0",
"title": "2020 5th International Conference on Electromechanical Control Technology and Transportation (ICECTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1fHGzrmKhCU",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"acronym": "acii",
"groupId": "1002992",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1fHGAqGFzP2",
"doi": "10.1109/ACII.2019.8925528",
"title": "Depression Detection from Electroencephalogram Signals Induced by Affective Auditory Stimuli",
"normalizedTitle": "Depression Detection from Electroencephalogram Signals Induced by Affective Auditory Stimuli",
"abstract": "Depression is a mental disorder characterized by emotional and cognitive dysfunction, which appears a state of low mood and aversion to activity. Depression can affect a person's thoughts, behavior, feelings, and sense of well-being. Depression is projected to be the second major life-threatening illness in 2020 by World Health Organization (WHO). Thus, it is urgent to detect and treat depression. Electroencephalogram (EEG) signals, which objectively reflect the working status of the human brain, are considered as promising physiological tools for depression detection. Negatively biased processing of affective stimuli in depression has been proven. In order to detect depression more effectively, we proposed an affective auditory stimuli induced depression detection method from EEG signals. In this method, we applied negative, positive and neutral affective auditory stimuli with several frequency selected from the International Affective Digitized Sounds (IADS-2) to induce negative affective bias in patients with depression. We synchronously collected EEG signals with three electrodes located on the prefrontal lobe (Fpl, Fpz, and Fp2), then extracted efficacious features by Empirical Mode Decomposition (EMD) based feature extraction method to detect depression effectively. The results of the proposed method showed that high-frequency affective auditory stimuli were more effective in depression detection and the frequency of affective auditory stimuli was a crucial property, which can influence the effectiveness of affective auditory stimuli in depression detection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Depression is a mental disorder characterized by emotional and cognitive dysfunction, which appears a state of low mood and aversion to activity. Depression can affect a person's thoughts, behavior, feelings, and sense of well-being. Depression is projected to be the second major life-threatening illness in 2020 by World Health Organization (WHO). Thus, it is urgent to detect and treat depression. Electroencephalogram (EEG) signals, which objectively reflect the working status of the human brain, are considered as promising physiological tools for depression detection. Negatively biased processing of affective stimuli in depression has been proven. In order to detect depression more effectively, we proposed an affective auditory stimuli induced depression detection method from EEG signals. In this method, we applied negative, positive and neutral affective auditory stimuli with several frequency selected from the International Affective Digitized Sounds (IADS-2) to induce negative affective bias in patients with depression. We synchronously collected EEG signals with three electrodes located on the prefrontal lobe (Fpl, Fpz, and Fp2), then extracted efficacious features by Empirical Mode Decomposition (EMD) based feature extraction method to detect depression effectively. The results of the proposed method showed that high-frequency affective auditory stimuli were more effective in depression detection and the frequency of affective auditory stimuli was a crucial property, which can influence the effectiveness of affective auditory stimuli in depression detection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Depression is a mental disorder characterized by emotional and cognitive dysfunction, which appears a state of low mood and aversion to activity. Depression can affect a person's thoughts, behavior, feelings, and sense of well-being. Depression is projected to be the second major life-threatening illness in 2020 by World Health Organization (WHO). Thus, it is urgent to detect and treat depression. Electroencephalogram (EEG) signals, which objectively reflect the working status of the human brain, are considered as promising physiological tools for depression detection. Negatively biased processing of affective stimuli in depression has been proven. In order to detect depression more effectively, we proposed an affective auditory stimuli induced depression detection method from EEG signals. In this method, we applied negative, positive and neutral affective auditory stimuli with several frequency selected from the International Affective Digitized Sounds (IADS-2) to induce negative affective bias in patients with depression. We synchronously collected EEG signals with three electrodes located on the prefrontal lobe (Fpl, Fpz, and Fp2), then extracted efficacious features by Empirical Mode Decomposition (EMD) based feature extraction method to detect depression effectively. The results of the proposed method showed that high-frequency affective auditory stimuli were more effective in depression detection and the frequency of affective auditory stimuli was a crucial property, which can influence the effectiveness of affective auditory stimuli in depression detection.",
"fno": "08925528",
"keywords": [
"Cognition",
"Electroencephalography",
"Emotion Recognition",
"Feature Extraction",
"Medical Disorders",
"Medical Signal Processing",
"Neurophysiology",
"High Frequency Affective Auditory Stimuli",
"Depression Detection Method",
"World Health Organization",
"EEG Signals",
"Empirical Mode Decomposition Based Feature Extraction Method",
"Prefrontal Lobe",
"Electroencephalography",
"Feature Extraction",
"Hospitals",
"Databases",
"Psychology",
"Electrodes",
"Physiology",
"Depression Detection",
"EEG",
"Affective Stimuli",
"High Frequency"
],
"authors": [
{
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing, Lanzhou University,Lanzhou,China",
"fullName": "Jian Shen",
"givenName": "Jian",
"surname": "Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing, Lanzhou University,Lanzhou,China",
"fullName": "Xiaowei Zhang",
"givenName": "Xiaowei",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing, Lanzhou University,Lanzhou,China",
"fullName": "Junlei Li",
"givenName": "Junlei",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Normal University,Faculty of Psychology,Beijing,China",
"fullName": "Yuanxi Li",
"givenName": "Yuanxi",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Anding Hospital, Capital Medical University,Beijing,China",
"fullName": "Lei Feng",
"givenName": "Lei",
"surname": "Feng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Anding Hospital, Capital Medical University,Beijing,China",
"fullName": "Changqing Hu",
"givenName": "Changqing",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Third People's Hospital,Tianshui,China",
"fullName": "Zhijie Ding",
"givenName": "Zhijie",
"surname": "Ding",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Third People's Hospital,Tianshui,China",
"fullName": "Gang Wang",
"givenName": "Gang",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing, Lanzhou University,Lanzhou,China",
"fullName": "Bin Hu",
"givenName": "Bin",
"surname": "Hu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-09-01T00:00:00",
"pubType": "proceedings",
"pages": "76-82",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3888-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08925522",
"articleId": "1fHGI3fX9Wo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08925496",
"articleId": "1fHGC54KKLS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2017/1710/0/1710a469",
"title": "Are Elderly Less Responsive to Emotional Stimuli? An EEG-based Study across Pleasant, Unpleasant and Neutral Greek Words",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2017/1710a469/12OmNAXPy12",
"parentPublication": {
"id": "proceedings/cbms/2017/1710/0",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2016/1611/0/07822696",
"title": "Pervasive EEG diagnosis of depression using Deep Belief Network with three-electrodes EEG collector",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2016/07822696/12OmNAndien",
"parentPublication": {
"id": "proceedings/bibm/2016/1611/0",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2017/3050/0/08217946",
"title": "A novel depression detection method based on pervasive EEG and EEG splitting criterion",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217946/12OmNyoiZ50",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2020/03/08279420",
"title": "A Case-Based Reasoning Model for Depression Based on Three-Electrode EEG Data",
"doi": null,
"abstractUrl": "/journal/ta/2020/03/08279420/13rRUxC0SCs",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2020/01/08283734",
"title": "Personalised, Multi-Modal, Affective State Detection for Hybrid Brain-Computer Music Interfacing",
"doi": null,
"abstractUrl": "/journal/ta/2020/01/08283734/13rRUygBwgj",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09767586",
"title": "Mutual Information Based Fusion Model (MIBFM): Mild Depression Recognition Using EEG and Pupil Area Signals",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09767586/1D4H8whwd9e",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/01/08798762",
"title": "An Improved Empirical Mode Decomposition of Electroencephalogram Signals for Depression Detection",
"doi": null,
"abstractUrl": "/journal/ta/2022/01/08798762/1cumIW39lsY",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2019/6303/0/09162347",
"title": "Bimodal ERP Study with Auditory-Visual Stimuli",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2019/09162347/1m6hOpdfTVu",
"parentPublication": {
"id": "proceedings/csde/2019/6303/0",
"title": "2019 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2023/01/09339987",
"title": "Effective Connectivity Based EEG Revealing the Inhibitory Deficits for Distracting Stimuli in Major Depression Disorders",
"doi": null,
"abstractUrl": "/journal/ta/2023/01/09339987/1qL4Oax1GNO",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313270",
"title": "EEG Based Depression Recognition by Combining Functional Brain Network and Traditional Biomarkers",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313270/1qmg9EBgOOc",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxbiGNwCQ",
"doi": "10.1109/VR46266.2020.00067",
"title": "The Impact of Multi-sensory Stimuli on Confidence Levels for Perceptual-cognitive Tasks in VR",
"normalizedTitle": "The Impact of Multi-sensory Stimuli on Confidence Levels for Perceptual-cognitive Tasks in VR",
"abstract": "Supporting perceptual-cognitive tasks is an important part of our daily lives. We use rich, multi-sensory feedback through sight, sound, touch, smell, and taste to support better perceptual-cognitive things we do, such as sports, cooking, and searching for a location, and to increase our confidence in performing those tasks in daily life. Same with real life, the demand for perceptual-cognitive tasks exists in serious VR simulations such as surgical or safety training systems. However, in contrast to real life, VR simulations are typically limited to visual and auditory cues, while sometimes adding simple tactile feedback. This could make it difficult to make confident decisions in VR.In this paper, we investigate the effects of multi-sensory stimuli, namely visuals, audio, two types of tactile (floor vibration and wind), and smell in terms of the confidence levels on a location-matching task which requires a combination of perceptual and cognitive work inside a virtual environment. We also measured the level of presence when participants visited virtual places with different combinations of sensory feedback. Our results show that our multi-sensory VR system was superior to a typical VR system (vision and audio) in terms of the sense of presence and user preference. However, the subjective confidence levels were higher in the typical VR system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Supporting perceptual-cognitive tasks is an important part of our daily lives. We use rich, multi-sensory feedback through sight, sound, touch, smell, and taste to support better perceptual-cognitive things we do, such as sports, cooking, and searching for a location, and to increase our confidence in performing those tasks in daily life. Same with real life, the demand for perceptual-cognitive tasks exists in serious VR simulations such as surgical or safety training systems. However, in contrast to real life, VR simulations are typically limited to visual and auditory cues, while sometimes adding simple tactile feedback. This could make it difficult to make confident decisions in VR.In this paper, we investigate the effects of multi-sensory stimuli, namely visuals, audio, two types of tactile (floor vibration and wind), and smell in terms of the confidence levels on a location-matching task which requires a combination of perceptual and cognitive work inside a virtual environment. We also measured the level of presence when participants visited virtual places with different combinations of sensory feedback. Our results show that our multi-sensory VR system was superior to a typical VR system (vision and audio) in terms of the sense of presence and user preference. However, the subjective confidence levels were higher in the typical VR system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Supporting perceptual-cognitive tasks is an important part of our daily lives. We use rich, multi-sensory feedback through sight, sound, touch, smell, and taste to support better perceptual-cognitive things we do, such as sports, cooking, and searching for a location, and to increase our confidence in performing those tasks in daily life. Same with real life, the demand for perceptual-cognitive tasks exists in serious VR simulations such as surgical or safety training systems. However, in contrast to real life, VR simulations are typically limited to visual and auditory cues, while sometimes adding simple tactile feedback. This could make it difficult to make confident decisions in VR.In this paper, we investigate the effects of multi-sensory stimuli, namely visuals, audio, two types of tactile (floor vibration and wind), and smell in terms of the confidence levels on a location-matching task which requires a combination of perceptual and cognitive work inside a virtual environment. We also measured the level of presence when participants visited virtual places with different combinations of sensory feedback. Our results show that our multi-sensory VR system was superior to a typical VR system (vision and audio) in terms of the sense of presence and user preference. However, the subjective confidence levels were higher in the typical VR system.",
"fno": "09089548",
"keywords": [
"Vibrations",
"Multisensory Integration",
"Cognition",
"Virtual Reality",
"User Interfaces",
"Wind",
"Multisensory VR",
"Perception",
"Confidence",
"Cognition",
"Floor Vibration",
"Wind",
"Smell"
],
"authors": [
{
"affiliation": "University of Canterbury,Human Interface Technology Lab NZ",
"fullName": "Sungchul Jung",
"givenName": "Sungchul",
"surname": "Jung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury,Human Interface Technology Lab NZ",
"fullName": "Andrew L. Wood",
"givenName": "Andrew L.",
"surname": "Wood",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury,School of Product Design",
"fullName": "Simon Hoermann",
"givenName": "Simon",
"surname": "Hoermann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury,School of Product Design",
"fullName": "Pramuditha L. Abhayawardhana",
"givenName": "Pramuditha L.",
"surname": "Abhayawardhana",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury,Human Interface Technology Lab NZ",
"fullName": "Robert W. Lindeman",
"givenName": "Robert W.",
"surname": "Lindeman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "463-472",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089480",
"articleId": "1jIx9NUNYPu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089568",
"articleId": "1jIxd8GJrri",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acssc/1994/6405/2/00471673",
"title": "Multidivisional graduate education program in sensory engineering",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1994/00471673/12OmNCga1Vc",
"parentPublication": {
"id": "proceedings/acssc/1994/6405/1",
"title": "Proceedings of 1994 28th Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131744",
"title": "Haptic ChairIO: A system to study the effect of wind and floor vibration feedback on spatial orientation in VEs",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131744/12OmNzBwGKT",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2008/3381/0/3381a354",
"title": "Perception of Virtual Multi-Sensory Objects: Some Musings on the Enactive Approach",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2008/3381a354/12OmNzFMFj9",
"parentPublication": {
"id": "proceedings/cw/2008/3381/0",
"title": "2008 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/03/07412776",
"title": "Rhythmic Haptic Stimuli Improve Short-Term Attention",
"doi": null,
"abstractUrl": "/journal/th/2016/03/07412776/13rRUypp57L",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714043",
"title": "Studying the Effects of Congruence of Auditory and Visual Stimuli on Virtual Reality Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714043/1B0Y2dBeUi4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a737",
"title": "All Shook Up: The Impact of Floor Vibration in Symmetric and Asymmetric Immersive Multi-user VR Gaming Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a737/1CJbKx7Zhyo",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a126",
"title": "Are you Seeing what I'm Seeing?: Perceptual Issues with Digital Twins in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a126/1CJdHG4lLfa",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797781",
"title": "Effect of Sensory Conflict and Postural Instability on Cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797781/1cJ0Sg2UoQE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089462",
"title": "VR Bridges: Simulating Smooth Uneven Surfaces in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089462/1jIxeZPD4LS",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090475",
"title": "Virtual Reality for Safety and Independence in Everyday Activities",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090475/1jIxjhubDfq",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1m6hKz6DL68",
"title": "2019 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"acronym": "csde",
"groupId": "1837144",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1m6hOpdfTVu",
"doi": "10.1109/CSDE48274.2019.9162347",
"title": "Bimodal ERP Study with Auditory-Visual Stimuli",
"normalizedTitle": "Bimodal ERP Study with Auditory-Visual Stimuli",
"abstract": "Dipole source analysis is applied to model brain generators of surface-recorded evoked potentials, epileptiform activity, and event-related potentials (ERP). The aim of this study was to explore brain activity of interaction between bimodal sensory cognition. Seven healthy volunteers were recruited in the study and ERP to these stimuli were recorded by 64 electrodes EEG recording system. Subjects were exposed to either the auditory and the visual stimulus alone or the combined auditory-visual (AV) stimuli. A source localization analysis was performed across conditions over initial, early and later temporal stages. The source locations across conditions were contrasted over similar time periods, indicating that source location of the bimodal auditory-visual (AV) stimuli differed from the sum of source locations from the auditory and the visual stimulus alone. It proves that there exists interplay in the brain in the bimodal auditory-visual stimuli paradigm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dipole source analysis is applied to model brain generators of surface-recorded evoked potentials, epileptiform activity, and event-related potentials (ERP). The aim of this study was to explore brain activity of interaction between bimodal sensory cognition. Seven healthy volunteers were recruited in the study and ERP to these stimuli were recorded by 64 electrodes EEG recording system. Subjects were exposed to either the auditory and the visual stimulus alone or the combined auditory-visual (AV) stimuli. A source localization analysis was performed across conditions over initial, early and later temporal stages. The source locations across conditions were contrasted over similar time periods, indicating that source location of the bimodal auditory-visual (AV) stimuli differed from the sum of source locations from the auditory and the visual stimulus alone. It proves that there exists interplay in the brain in the bimodal auditory-visual stimuli paradigm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dipole source analysis is applied to model brain generators of surface-recorded evoked potentials, epileptiform activity, and event-related potentials (ERP). The aim of this study was to explore brain activity of interaction between bimodal sensory cognition. Seven healthy volunteers were recruited in the study and ERP to these stimuli were recorded by 64 electrodes EEG recording system. Subjects were exposed to either the auditory and the visual stimulus alone or the combined auditory-visual (AV) stimuli. A source localization analysis was performed across conditions over initial, early and later temporal stages. The source locations across conditions were contrasted over similar time periods, indicating that source location of the bimodal auditory-visual (AV) stimuli differed from the sum of source locations from the auditory and the visual stimulus alone. It proves that there exists interplay in the brain in the bimodal auditory-visual stimuli paradigm.",
"fno": "09162347",
"keywords": [
"Auditory Evoked Potentials",
"Bioelectric Potentials",
"Biomedical Electrodes",
"Brain",
"Cognition",
"Electroencephalography",
"Medical Signal Processing",
"Neurophysiology",
"Visual Evoked Potentials",
"Surface Recorded Evoked Potentials",
"Epileptiform Activity",
"Event Related Potentials",
"Bimodal Sensory Cognition",
"Seven Healthy Volunteers",
"64 Electrodes EEG Recording System",
"Visual Stimulus",
"Combined Auditory Visual",
"Source Localization Analysis",
"Source Location",
"Bimodal Auditory Visual Stimuli Paradigm",
"Bimodal ERP Study",
"Dipole Source Analysis",
"Model Brain Generators",
"Visualization",
"Thalamus",
"Electrodes",
"Brain Modeling",
"Electroencephalography",
"Occipital Lobe",
"Electric Potential",
"Dipole Source Analysis",
"Event Related Potentials"
],
"authors": [
{
"affiliation": "Institute of Biomedical Enginnering, Chinese Academy of Meidical Sciences,Tianjin,China",
"fullName": "Xiaobo Xie",
"givenName": "Xiaobo",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Biomedical Enginnering, Chinese Academy of Meidical Sciences,Tianjin,China",
"fullName": "Hongyan Cui",
"givenName": "Hongyan",
"surname": "Cui",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "csde",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-12-01T00:00:00",
"pubType": "proceedings",
"pages": "1-3",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-6303-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09162386",
"articleId": "1m6hLlOgyOs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09162401",
"articleId": "1m6hN5Fyoco",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2017/1710/0/1710a469",
"title": "Are Elderly Less Responsive to Emotional Stimuli? An EEG-based Study across Pleasant, Unpleasant and Neutral Greek Words",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2017/1710a469/12OmNAXPy12",
"parentPublication": {
"id": "proceedings/cbms/2017/1710/0",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itcc/2003/1916/0/19160582",
"title": "Multimedia E-Mail Data Browsing: The Synergistic Use of Various Forms of Auditory Stimuli",
"doi": null,
"abstractUrl": "/proceedings-article/itcc/2003/19160582/12OmNBNM93A",
"parentPublication": {
"id": "proceedings/itcc/2003/1916/0",
"title": "Information Technology: Coding and Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2001/0948/0/09480241",
"title": "Auditory Motion Induced by Visual Motion and Its Dependence on Stimulus Size",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2001/09480241/12OmNBh8gTy",
"parentPublication": {
"id": "proceedings/vr/2001/0948/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a806",
"title": "Auditory Brain-Computer Interface Paradigm with Head Related Impulse Response-Based Spatial Cues",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a806/12OmNqBtiR3",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2012/4702/0/4702a654",
"title": "ERP Approach: What Could We Learn From?",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2012/4702a654/12OmNrkjViS",
"parentPublication": {
"id": "proceedings/icalt/2012/4702/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itcc/2002/1506/0/15060190",
"title": "Utilising Audio-Visual Stimuli in Interactive Information Systems: A Two Domain Investigation on Auditory Metaphors",
"doi": null,
"abstractUrl": "/proceedings-article/itcc/2002/15060190/12OmNyrqzxg",
"parentPublication": {
"id": "proceedings/itcc/2002/1506/0",
"title": "Information Technology: Coding and Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714043",
"title": "Studying the Effects of Congruence of Auditory and Visual Stimuli on Virtual Reality Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714043/1B0Y2dBeUi4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2019/3888/0/08925528",
"title": "Depression Detection from Electroencephalogram Signals Induced by Affective Auditory Stimuli",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2019/08925528/1fHGAqGFzP2",
"parentPublication": {
"id": "proceedings/acii/2019/3888/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2023/01/09339987",
"title": "Effective Connectivity Based EEG Revealing the Inhibitory Deficits for Distracting Stimuli in Major Depression Disorders",
"doi": null,
"abstractUrl": "/journal/ta/2023/01/09339987/1qL4Oax1GNO",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a465",
"title": "Amplifying Realities: Gradual and Seamless Scaling of Visual and Auditory Stimuli in Extended Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a465/1yeQA0ONooU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1tMztXW",
"doi": "10.1109/VR.2018.8446510",
"title": "Comparing Interface Affordances for Controlling a Push Broom in VR",
"normalizedTitle": "Comparing Interface Affordances for Controlling a Push Broom in VR",
"abstract": "This study explores how VR controller interfaces affect how participants hold a virtual push broom in VR. We aim to understand how the affordances available with current VR controllers and a custom broom VR controller impact user hand placement in a visual VR broom task. We compare hand placement in two VR conditions against hand placement holding a real push broom. Our goal is to understand the roles that controllers have on recreating physically accurate actions in VR training scenarios. The results from this initial pilot show an effect of the broom controller condition but also that the order in which some of the conditions were presented to subjects affected the way subjects held the VR and real push brooms in subsequent actions. Future work will continue to explore how controller affordance may impact the role of training in VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study explores how VR controller interfaces affect how participants hold a virtual push broom in VR. We aim to understand how the affordances available with current VR controllers and a custom broom VR controller impact user hand placement in a visual VR broom task. We compare hand placement in two VR conditions against hand placement holding a real push broom. Our goal is to understand the roles that controllers have on recreating physically accurate actions in VR training scenarios. The results from this initial pilot show an effect of the broom controller condition but also that the order in which some of the conditions were presented to subjects affected the way subjects held the VR and real push brooms in subsequent actions. Future work will continue to explore how controller affordance may impact the role of training in VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study explores how VR controller interfaces affect how participants hold a virtual push broom in VR. We aim to understand how the affordances available with current VR controllers and a custom broom VR controller impact user hand placement in a visual VR broom task. We compare hand placement in two VR conditions against hand placement holding a real push broom. Our goal is to understand the roles that controllers have on recreating physically accurate actions in VR training scenarios. The results from this initial pilot show an effect of the broom controller condition but also that the order in which some of the conditions were presented to subjects affected the way subjects held the VR and real push brooms in subsequent actions. Future work will continue to explore how controller affordance may impact the role of training in VR.",
"fno": "08446510",
"keywords": [
"Virtual Reality",
"Custom Broom VR",
"Controller Impact User Hand Placement",
"Visual VR Broom Task",
"VR Conditions",
"VR Training Scenarios",
"Broom Controller Condition",
"Controller Affordance",
"Interface Affordances",
"VR Controller Interfaces",
"Virtual Push Broom",
"Current VR Controllers",
"Training",
"Task Analysis",
"Human Computer Interaction",
"Aerospace Electronics",
"Virtual Reality",
"Visualization",
"Electronic Mail",
"Human Centered Computing Human Computer Interaction HCI Interaction Devices Human Centered Computing Human Computer Interaction HCI Empirical Studies In HCI"
],
"authors": [
{
"affiliation": "Department of Computer Science, University of Minnesota, Duluth",
"fullName": "Noah Miller",
"givenName": "Noah",
"surname": "Miller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University of Minnesota, Duluth",
"fullName": "Pete Willemsen",
"givenName": "Pete",
"surname": "Willemsen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical and Industrial Engineering, University of Minnesota, Duluth",
"fullName": "Robert Feyen",
"givenName": "Robert",
"surname": "Feyen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "635-636",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446504",
"articleId": "13bd1gQYgEn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446057",
"articleId": "13bd1gFCjsa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223386",
"title": "Comparing the performance of natural, semi-natural, and non-natural locomotion techniques in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223386/12OmNx9nGM1",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446453",
"title": "Force Push: Exploring Expressive Gesture-to-Force Mappings for Indirect 3D Object Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446453/13bd1sx4Zt0",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a596",
"title": "VR Wayfinding Training for People with Visual Impairment using VR Treadmill and VR Tracker",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a596/1CJf4aHcqoU",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a905",
"title": "Haptics in VR Using Origami-Augmented Drones",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a905/1J7WrPcWIVO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2019/2297/0/229700a384",
"title": "A Study of Usability Improvement in Immersive VR Programming Environment",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2019/229700a384/1fHklNWpumY",
"parentPublication": {
"id": "proceedings/cw/2019/2297/0",
"title": "2019 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089442",
"title": "Effects of Virtual Hand Representation on Interaction and Embodiment in HMD-based Virtual Environments Using Controllers",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089442/1jIxe7ldiE0",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090611",
"title": "Pain Experience in Social VR: The Competing Effect on Objective Pain Tolerance and Subjective Pain Perception",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090611/1jIxokdBogo",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a713",
"title": "Exploring Body Gestures for Small Object Selection in Dense Environment in HMD VR for Data Visualization Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a713/1tnX59fALbG",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a735",
"title": "[DC] Towards Universal VR Sickness Mitigation Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a251",
"title": "A Japanese Character Flick-Input Interface for Entering Text in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a251/1yfxO7CNnQk",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKisy",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45VW8bq8",
"doi": "10.1109/ISMAR.2018.00034",
"title": "The Impact of an Accurate Vertical Localization with HRTFs on Short Explorations of Immersive Virtual Reality Scenarios",
"normalizedTitle": "The Impact of an Accurate Vertical Localization with HRTFs on Short Explorations of Immersive Virtual Reality Scenarios",
"abstract": "Achieving a full 3D auditory experience with head-related transfer functions (HRTFs) is still one of the main challenges of spatial audio rendering. HRTFs capture the listener's acoustic effects and personal perception, allowing immersion in virtual reality (VR) applications. This paper aims to investigate the connection between listener sensitivity in vertical localization cues and experienced presence, spatial audio quality, and attention. Two VR experiments with head-mounted display (HMD) and animated visual avatar are proposed: (i) a screening test aiming to evaluate the participants' localization performance with HRTFs for a non-visible spatialized audio source, and (ii) a 2 minute free exploration of a VR scene with five audiovisual sources in a both non-spatialized (2D stereo panning) and spatialized (free-field HRTF rendering) listening conditions. The screening test allows a distinction between good and bad localizers. The second one shows that no biases are introduced in the quality of the experience (QoE) due to different audio rendering methods; more interestingly, good localizers perceive a lower audio latency and they are less involved in the visual aspects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Achieving a full 3D auditory experience with head-related transfer functions (HRTFs) is still one of the main challenges of spatial audio rendering. HRTFs capture the listener's acoustic effects and personal perception, allowing immersion in virtual reality (VR) applications. This paper aims to investigate the connection between listener sensitivity in vertical localization cues and experienced presence, spatial audio quality, and attention. Two VR experiments with head-mounted display (HMD) and animated visual avatar are proposed: (i) a screening test aiming to evaluate the participants' localization performance with HRTFs for a non-visible spatialized audio source, and (ii) a 2 minute free exploration of a VR scene with five audiovisual sources in a both non-spatialized (2D stereo panning) and spatialized (free-field HRTF rendering) listening conditions. The screening test allows a distinction between good and bad localizers. The second one shows that no biases are introduced in the quality of the experience (QoE) due to different audio rendering methods; more interestingly, good localizers perceive a lower audio latency and they are less involved in the visual aspects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Achieving a full 3D auditory experience with head-related transfer functions (HRTFs) is still one of the main challenges of spatial audio rendering. HRTFs capture the listener's acoustic effects and personal perception, allowing immersion in virtual reality (VR) applications. This paper aims to investigate the connection between listener sensitivity in vertical localization cues and experienced presence, spatial audio quality, and attention. Two VR experiments with head-mounted display (HMD) and animated visual avatar are proposed: (i) a screening test aiming to evaluate the participants' localization performance with HRTFs for a non-visible spatialized audio source, and (ii) a 2 minute free exploration of a VR scene with five audiovisual sources in a both non-spatialized (2D stereo panning) and spatialized (free-field HRTF rendering) listening conditions. The screening test allows a distinction between good and bad localizers. The second one shows that no biases are introduced in the quality of the experience (QoE) due to different audio rendering methods; more interestingly, good localizers perceive a lower audio latency and they are less involved in the visual aspects.",
"fno": "745900a090",
"keywords": [
"Audio Signal Processing",
"Audio Visual Systems",
"Avatars",
"Hearing",
"Helmet Mounted Displays",
"Quality Of Experience",
"Rendering Computer Graphics",
"Transfer Functions",
"Virtual Reality",
"Bad Localizers",
"Accurate Vertical Localization",
"Immersive Virtual Reality Scenarios",
"3 D Auditory Experience",
"Head Related Transfer Functions",
"Spatial Audio Rendering",
"HRT Fs Capture",
"Personal Perception",
"Virtual Reality Applications",
"Listener Sensitivity",
"Vertical Localization Cues",
"Spatial Audio Quality",
"VR Experiments",
"Head Mounted Display",
"Visual Avatar",
"Screening Test",
"Nonvisible Spatialized Audio Source",
"VR Scene",
"Audiovisual Sources",
"2 D Stereo Panning",
"HRTF Rendering",
"Audio Rendering Methods",
"Time 2 0 Min",
"Rendering Computer Graphics",
"Acoustics",
"Virtual Environments",
"Visualization",
"Solid Modeling",
"Ear",
"Virtual Reality",
"Spatial Audio Rendering",
"Head Related Transfer Function",
"Auditory Vertical Localization",
"Personalization",
"Quality Of The Experience"
],
"authors": [
{
"affiliation": null,
"fullName": "Michele Geronazzo",
"givenName": "Michele",
"surname": "Geronazzo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Erik Sikström",
"givenName": "Erik",
"surname": "Sikström",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jari Kleimola",
"givenName": "Jari",
"surname": "Kleimola",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Federico Avanzini",
"givenName": "Federico",
"surname": "Avanzini",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Amalia de Götzen",
"givenName": "Amalia",
"surname": "de Götzen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stefania Serafin",
"givenName": "Stefania",
"surname": "Serafin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "90-97",
"year": "2018",
"issn": "1554-7868",
"isbn": "978-1-5386-7459-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "745900a080",
"articleId": "17D45W1Oa1E",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "745900a098",
"articleId": "17D45WYQJag",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sive/2015/1969/0/07361293",
"title": "Evaluating vertical localization performance of 3D sound rendering models with a perceptual metric",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2015/07361293/12OmNAObbB4",
"parentPublication": {
"id": "proceedings/sive/2015/1969/0",
"title": "2015 IEEE 2nd VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948409",
"title": "P-HRTF: Efficient personalized HRTF computation for high-fidelity spatial sound",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948409/12OmNAT0mSm",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2015/6850/0/6850a225",
"title": "Extracting Anthropometric Parameters from a Scanned 3D-Head-Model",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2015/6850a225/12OmNCesrbJ",
"parentPublication": {
"id": "proceedings/icisce/2015/6850/0",
"title": "2015 2nd International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012065",
"title": "Realistic audio in immersive video conferencing",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012065/12OmNqIzh2i",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2014/5781/0/07006282",
"title": "Audio-visual attractors for capturing attention to the screens when walking in CAVE systems",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2014/07006282/12OmNvT2oZl",
"parentPublication": {
"id": "proceedings/sive/2014/5781/0",
"title": "2014 IEEE VR Workshop: Sonic Interaction in Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07383327",
"title": "Efficient HRTF-based Spatial Audio for Area and Volumetric Sources",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07383327/13rRUyfKIHQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577195",
"title": "3D Sound Rendering in a Virtual Environment to Evaluate Pedestrian Street Crossing Decisions at a Roundabout",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577195/17D45XeKgwR",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798177",
"title": "Perceptual Study of Near-Field Binaural Audio Rendering in Six-Degrees-of-Freedom Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798177/1cJ13xpYvE4",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090531",
"title": "Immersive sonification of protein surface",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090531/1jIxzEw3bb2",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090553",
"title": "Scene-aware Sound Rendering in Virtual and Real Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090553/1jIxzQzr0EU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1EaP2tcnSIo",
"title": "2022 IEEE/ACM 44th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"acronym": "icse-companion",
"groupId": "1002125",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1EaP2BazwaI",
"doi": "10.1109/ICSE-Companion55297.2022.9793753",
"title": "VRTest: An Extensible Framework for Automatic Testing of Virtual Reality Scenes",
"normalizedTitle": "VRTest: An Extensible Framework for Automatic Testing of Virtual Reality Scenes",
"abstract": "Virtual Reality (VR) is an emerging technique that attracts interest from various application domains such as training, education, remote communication, gaming, and navigation. Despite the ever growing number of VR software projects, the quality assurance techniques for VR software has not been well studied. Therefore, the validation of VR software largely rely on pure manual testing. In this paper, we present a novel testing framework called VRTest to automate the testing of scenes in VR software. In particular, VRTest extracts information from a VR scene and controls the user camera to explore the scene and interact with the virtual objects with certain testing strategies. VRTest currently supports two built-in testing strategies: VRMonkey and VRGreed, which use pure random exploration and greedy algorithm to explore interact-able objects in VR scenes. The video of our tool is available on Youtube at https://www.youtube.com/watch?v=TARqTEaa7_Q",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality (VR) is an emerging technique that attracts interest from various application domains such as training, education, remote communication, gaming, and navigation. Despite the ever growing number of VR software projects, the quality assurance techniques for VR software has not been well studied. Therefore, the validation of VR software largely rely on pure manual testing. In this paper, we present a novel testing framework called VRTest to automate the testing of scenes in VR software. In particular, VRTest extracts information from a VR scene and controls the user camera to explore the scene and interact with the virtual objects with certain testing strategies. VRTest currently supports two built-in testing strategies: VRMonkey and VRGreed, which use pure random exploration and greedy algorithm to explore interact-able objects in VR scenes. The video of our tool is available on Youtube at https://www.youtube.com/watch?v=TARqTEaa7_Q",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality (VR) is an emerging technique that attracts interest from various application domains such as training, education, remote communication, gaming, and navigation. Despite the ever growing number of VR software projects, the quality assurance techniques for VR software has not been well studied. Therefore, the validation of VR software largely rely on pure manual testing. In this paper, we present a novel testing framework called VRTest to automate the testing of scenes in VR software. In particular, VRTest extracts information from a VR scene and controls the user camera to explore the scene and interact with the virtual objects with certain testing strategies. VRTest currently supports two built-in testing strategies: VRMonkey and VRGreed, which use pure random exploration and greedy algorithm to explore interact-able objects in VR scenes. The video of our tool is available on Youtube at https://www.youtube.com/watch?v=TARqTEaa7_Q",
"fno": "959800a232",
"keywords": [
"Greedy Algorithms",
"Program Testing",
"Virtual Reality",
"Automatic Testing",
"Virtual Reality Scenes",
"VR Software Projects",
"Quality Assurance Techniques",
"Pure Manual Testing",
"Testing Framework",
"Particular Extracts Information",
"VR Test Extracts Information",
"VR Scene",
"Virtual Objects",
"Testing Strategies",
"Greedy Algorithms",
"Training",
"Quality Assurance",
"Video On Demand",
"Virtual Reality",
"Cameras",
"Software",
"Software Testing",
"Virtual Reality",
"Scene Exploration"
],
"authors": [
{
"affiliation": "University of Texas at San Antonio,San Antonio,Texas,USA",
"fullName": "Xiaoyin Wang",
"givenName": "Xiaoyin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icse-companion",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-05-01T00:00:00",
"pubType": "proceedings",
"pages": "232-236",
"year": "2022",
"issn": "2574-1926",
"isbn": "978-1-6654-9598-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "959800a227",
"articleId": "1EaP7h5guU8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "959800a237",
"articleId": "1EaP5hTVfag",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wsc/2006/0500/0/04117851",
"title": "Structure of an Extensible Augmented Reality Framework for Visualization of Simulated Construction Processes",
"doi": null,
"abstractUrl": "/proceedings-article/wsc/2006/04117851/12OmNBOll2O",
"parentPublication": {
"id": "proceedings/wsc/2006/0500/0",
"title": "2006 Winter Simulation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504792",
"title": "What do we care 4? A virtual reality music video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504792/12OmNqAU6zU",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327z031",
"title": "Tutorial 2: Developing Virtual Reality applications with the Visualization Toolkit (VTK)",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327z031/12OmNyRg494",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a311",
"title": "Affective Virtual Reality System (AVRS): Design and Ratings of Affective VR Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a311/1ap5C3hrD6o",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797916",
"title": "DepthMove: Hands-free Interaction in Virtual Reality Using Head Motions in the Depth Dimension",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797916/1cJ0K0zJcv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2019/5434/0/543400a196",
"title": "Towards the Systematic Testing of Virtual Reality Programs",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2019/543400a196/1fHjxgNc8Le",
"parentPublication": {
"id": "proceedings/svr/2019/5434/0",
"title": "2019 21st Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2020/7303/0/730300b129",
"title": "VRvisu++: A Tool for Virtual Reality-Based Visualization of MRI Images",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2020/730300b129/1nkDf7BIHx6",
"parentPublication": {
"id": "proceedings/compsac/2020/7303/0",
"title": "2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a057",
"title": "Understanding VR Software Testing Needs from Stakeholders’ Points of View",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a057/1oZBAurDhv2",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpc/2021/1403/0/140300a381",
"title": "Automatic Extraction of Code Dependency in Virtual Reality Software",
"doi": null,
"abstractUrl": "/proceedings-article/icpc/2021/140300a381/1tB7wrdD0u4",
"parentPublication": {
"id": "proceedings/icpc/2021/1403/0/",
"title": "2021 IEEE/ACM 29th International Conference on Program Comprehension (ICPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a460",
"title": "VR-Phore: A Novel Virtual Reality system for Diagnosis of Binocular Vision",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a460/1tnXL2XEOw8",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgROnDNsY",
"doi": "10.1109/VR55154.2023.00051",
"title": "Where to Render: Studying Renderability for IBR of Large-Scale Scenes",
"normalizedTitle": "Where to Render: Studying Renderability for IBR of Large-Scale Scenes",
"abstract": "Image-based rendering (IBR) technique enables presenting real scenes interactively to viewers and hence is a key component for implementing VR telepresence. The quality of IBR results depends on the set of pre-captured views, the rendering algorithm used, and the camera parameters of the novel view to be synthesized. Numerous methods were proposed for optimizing the set of captured images and enhancing the rendering algorithms. However, from which regions IBR methods can synthesize satisfactory results is not yet well studied. In this work, we introduce the concept of renderability, which predicts the quality of IBR results at any given viewpoint and view direction. Consequently, the renderability values evaluated for the 5D camera parameter space form a field, which effectively guides viewpoint/trajectory selection for IBR, especially for challenging large-scale 3D scenes. To demonstrate this capability, we designed 2 VR applications: a path planner that allows users to navigate through sparsely captured scenes with controllable rendering quality and a view selector that provides an overview for a scene from diverse and high quality perspectives. We believe the renderability concept, the proposed evaluation method, and the suggested applications will motivate and facilitate the use of IBR in various interactive settings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Image-based rendering (IBR) technique enables presenting real scenes interactively to viewers and hence is a key component for implementing VR telepresence. The quality of IBR results depends on the set of pre-captured views, the rendering algorithm used, and the camera parameters of the novel view to be synthesized. Numerous methods were proposed for optimizing the set of captured images and enhancing the rendering algorithms. However, from which regions IBR methods can synthesize satisfactory results is not yet well studied. In this work, we introduce the concept of renderability, which predicts the quality of IBR results at any given viewpoint and view direction. Consequently, the renderability values evaluated for the 5D camera parameter space form a field, which effectively guides viewpoint/trajectory selection for IBR, especially for challenging large-scale 3D scenes. To demonstrate this capability, we designed 2 VR applications: a path planner that allows users to navigate through sparsely captured scenes with controllable rendering quality and a view selector that provides an overview for a scene from diverse and high quality perspectives. We believe the renderability concept, the proposed evaluation method, and the suggested applications will motivate and facilitate the use of IBR in various interactive settings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image-based rendering (IBR) technique enables presenting real scenes interactively to viewers and hence is a key component for implementing VR telepresence. The quality of IBR results depends on the set of pre-captured views, the rendering algorithm used, and the camera parameters of the novel view to be synthesized. Numerous methods were proposed for optimizing the set of captured images and enhancing the rendering algorithms. However, from which regions IBR methods can synthesize satisfactory results is not yet well studied. In this work, we introduce the concept of renderability, which predicts the quality of IBR results at any given viewpoint and view direction. Consequently, the renderability values evaluated for the 5D camera parameter space form a field, which effectively guides viewpoint/trajectory selection for IBR, especially for challenging large-scale 3D scenes. To demonstrate this capability, we designed 2 VR applications: a path planner that allows users to navigate through sparsely captured scenes with controllable rendering quality and a view selector that provides an overview for a scene from diverse and high quality perspectives. We believe the renderability concept, the proposed evaluation method, and the suggested applications will motivate and facilitate the use of IBR in various interactive settings.",
"fno": "481500a356",
"keywords": [
"Three Dimensional Displays",
"Telepresence",
"Navigation",
"Virtual Reality",
"User Interfaces",
"Aerospace Electronics",
"Rendering Computer Graphics",
"Computer Graphics Techniques Image Based Rendering Scene Rendering",
"Evaluation Methods Renderability View Selection Rendering Path Planning"
],
"authors": [
{
"affiliation": "Shenzhen University",
"fullName": "Zimu Yi",
"givenName": "Zimu",
"surname": "Yi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenzhen University",
"fullName": "Ke Xie",
"givenName": "Ke",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenzhen University",
"fullName": "Jiahui Lyu",
"givenName": "Jiahui",
"surname": "Lyu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Guelph",
"fullName": "Minglun Gong",
"givenName": "Minglun",
"surname": "Gong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenzhen University",
"fullName": "Hui Huang",
"givenName": "Hui",
"surname": "Huang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "356-366",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1MNgRJB0Yxy",
"name": "pvr202348150-010108479s1-mm_481500a356.zip",
"size": "21.5 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108479s1-mm_481500a356.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "481500a347",
"articleId": "1MNgXaINwAg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a367",
"articleId": "1MNgJRvtdtu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2017/4822/0/07926682",
"title": "Densification of Semi-Dense Reconstructions for Novel View Generation of Live Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926682/12OmNBPc8wD",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmm/2004/2084/0/20840002",
"title": "Non-Uniform Sampling for Image-Based Rendering: Convergence of Image, Vision, and Graphics",
"doi": null,
"abstractUrl": "/proceedings-article/mmm/2004/20840002/12OmNBt3qo0",
"parentPublication": {
"id": "proceedings/mmm/2004/2084/0",
"title": "Multi-Media Modeling Conference, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2003/7965/1/7965233",
"title": "A system for active image-based rendering",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2003/7965233/12OmNs0TKUB",
"parentPublication": {
"id": "proceedings/icme/2003/7965/1",
"title": "2003 International Conference on Multimedia and Expo. ICME '03. Proceedings (Cat. No.03TH8698)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a351",
"title": "Multi-View Inpainting for Image-Based Scene Editing and Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a351/12OmNxEjXRB",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a286",
"title": "Automatic 3D Car Model Alignment for Mixed Image-Based Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a286/12OmNy314dK",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2006/2754/0/27540031",
"title": "A New IBR Approach Based on View Synthesis for Virtual Environment Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2006/27540031/12OmNy49sIA",
"parentPublication": {
"id": "proceedings/icat/2006/2754/0",
"title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwcse/2009/3881/1/3881a288",
"title": "Image-Based Rendering Using Unstructured Image Set",
"doi": null,
"abstractUrl": "/proceedings-article/iwcse/2009/3881a288/12OmNzwHvmn",
"parentPublication": {
"id": "proceedings/iwcse/2009/3881/1",
"title": "Computer Science and Engineering, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3514",
"title": "Fourier PlenOctrees for Dynamic Radiance Field Rendering in Real-time",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3514/1H1m9gTxNYc",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090570",
"title": "Presenting COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090570/1jIxhITsCYg",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09385924",
"title": "Instant Panoramic Texture Mapping with Semantic Object Matching for Large-Scale Urban Scene Reproduction",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09385924/1seinXN8TwQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1ap5wvyUHKM",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "1ap5C3hrD6o",
"doi": "10.1109/ICVRV.2017.00072",
"title": "Affective Virtual Reality System (AVRS): Design and Ratings of Affective VR Scenes",
"normalizedTitle": "Affective Virtual Reality System (AVRS): Design and Ratings of Affective VR Scenes",
"abstract": "The present affective stimulation systems have shortages in terms of inefficient emotion evocation and poor immersion. This paper presents the design, instructions and ratings of a novel Affective Virtual Reality System (AVRS), which includes a large set of emotionally-evocative VR scenes and their affective ratings. It can provide more objective and direct affective stimuli of basic emotions (happiness, sadness, fear, relaxation, disgust, and rage) by shielding the environmental interferences. In this study, affective VR scenes have been designed by using various standard affective picture, video and audio materials as references. To assess the three dimensional emotion indices of valence, arousal and dominance, each scene of the system is rated and standardized by Self-Assessment Mainikin. AVRS is the first released VR version affect stimuli materials, which sets a precedent for future interdisciplinary work bridging the gap between VR and cognitive psychology.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The present affective stimulation systems have shortages in terms of inefficient emotion evocation and poor immersion. This paper presents the design, instructions and ratings of a novel Affective Virtual Reality System (AVRS), which includes a large set of emotionally-evocative VR scenes and their affective ratings. It can provide more objective and direct affective stimuli of basic emotions (happiness, sadness, fear, relaxation, disgust, and rage) by shielding the environmental interferences. In this study, affective VR scenes have been designed by using various standard affective picture, video and audio materials as references. To assess the three dimensional emotion indices of valence, arousal and dominance, each scene of the system is rated and standardized by Self-Assessment Mainikin. AVRS is the first released VR version affect stimuli materials, which sets a precedent for future interdisciplinary work bridging the gap between VR and cognitive psychology.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The present affective stimulation systems have shortages in terms of inefficient emotion evocation and poor immersion. This paper presents the design, instructions and ratings of a novel Affective Virtual Reality System (AVRS), which includes a large set of emotionally-evocative VR scenes and their affective ratings. It can provide more objective and direct affective stimuli of basic emotions (happiness, sadness, fear, relaxation, disgust, and rage) by shielding the environmental interferences. In this study, affective VR scenes have been designed by using various standard affective picture, video and audio materials as references. To assess the three dimensional emotion indices of valence, arousal and dominance, each scene of the system is rated and standardized by Self-Assessment Mainikin. AVRS is the first released VR version affect stimuli materials, which sets a precedent for future interdisciplinary work bridging the gap between VR and cognitive psychology.",
"fno": "263600a311",
"keywords": [
"Behavioural Sciences Computing",
"Emotion Recognition",
"Psychology",
"Virtual Reality",
"AVRS",
"Emotionally Evocative VR Scenes",
"Affective Ratings",
"Direct Affective Stimuli",
"Basic Emotions",
"Affective VR Scenes",
"Audio Materials",
"Dimensional Emotion Indices",
"Affective Stimulation Systems",
"Inefficient Emotion Evocation",
"VR Version",
"Affective Virtual Reality System",
"Virtual Reality",
"Color",
"Feature Extraction",
"Standards",
"Psychology",
"Solid Modeling",
"Databases",
"Immersion VR Scenes",
"Emotional Stimuli"
],
"authors": [
{
"affiliation": null,
"fullName": "Wenzhuo Zhang",
"givenName": "Wenzhuo",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lin Shu",
"givenName": "Lin",
"surname": "Shu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiangmin Xu",
"givenName": "Xiangmin",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dan Liao",
"givenName": "Dan",
"surname": "Liao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "311-314",
"year": "2017",
"issn": "2375-141X",
"isbn": "978-1-5386-2636-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "263600a307",
"articleId": "1ap5B921T1K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "263600a315",
"articleId": "1ap5CNsFs9a",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2017/0563/0/08273640",
"title": "Comparing virtual reality with computer monitors as rating environments for affective dimensions in social interactions",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2017/08273640/12OmNwDSdGJ",
"parentPublication": {
"id": "proceedings/acii/2017/0563/0",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890568",
"title": "From crowdsourced rankings to affective ratings",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890568/12OmNwkhTiA",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448286",
"title": "Teacher-Guided Educational VR: Assessment of Live and Prerecorded Teachers Guiding Virtual Field Trips",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448286/13bd1eSlyt6",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669802",
"title": "Cross-subject And Cross-device Wearable EEG Emotion Recognition Using Frontal EEG Under Virtual Reality Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669802/1A9VFDA5CH6",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049710",
"title": "Exploring Plausibility and Presence in Mixed Reality Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049710/1KYoplRZLWM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09507320",
"title": "OctoPocus in VR: Using a Dynamic Guide for 3D Mid-Air Gestures in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09507320/1vNfMheqZ2w",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09531381",
"title": "A Survey on Affective and Cognitive VR",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09531381/1wJl1nWksQo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/06/09646535",
"title": "Affective Virtual Reality: How to Design Artificial Experiences Impacting Human Emotions",
"doi": null,
"abstractUrl": "/magazine/cg/2021/06/09646535/1zdLGFP8O5O",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2021/3225/0/322500a160",
"title": "Affective State Classification in Virtual Reality Environments Using Electrocardiogram and Respiration Signals",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2021/322500a160/1zxLvYxcT2U",
"parentPublication": {
"id": "proceedings/aivr/2021/3225/0",
"title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2021/3225/0/322500a242",
"title": "Investigating the Affective State of VR HMD User When Watching Videos Displayed in Different Formats",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2021/322500a242/1zxLyEF5yRW",
"parentPublication": {
"id": "proceedings/aivr/2021/3225/0",
"title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxhITsCYg",
"doi": "10.1109/VRW50115.2020.00251",
"title": "Presenting COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"normalizedTitle": "Presenting COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"abstract": "From image-based virtual tours of apartments to digital museum exhibits, transforming photographs of real-world scenes into visually faithful virtual environments has many applications. In this paper, we present our development of a toolkit that places recent advances in the field of image-based rendering (IBR) into the hands of virtual reality (VR) researchers and content creators. We map out how these advances can improve the way we usually render virtual scenes from photographs. We then provide insight into the toolkit’s design as a package for the Unity game engine and share details on core elements of our implementation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "From image-based virtual tours of apartments to digital museum exhibits, transforming photographs of real-world scenes into visually faithful virtual environments has many applications. In this paper, we present our development of a toolkit that places recent advances in the field of image-based rendering (IBR) into the hands of virtual reality (VR) researchers and content creators. We map out how these advances can improve the way we usually render virtual scenes from photographs. We then provide insight into the toolkit’s design as a package for the Unity game engine and share details on core elements of our implementation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "From image-based virtual tours of apartments to digital museum exhibits, transforming photographs of real-world scenes into visually faithful virtual environments has many applications. In this paper, we present our development of a toolkit that places recent advances in the field of image-based rendering (IBR) into the hands of virtual reality (VR) researchers and content creators. We map out how these advances can improve the way we usually render virtual scenes from photographs. We then provide insight into the toolkit’s design as a package for the Unity game engine and share details on core elements of our implementation.",
"fno": "09090570",
"keywords": [
"Rendering Computer Graphics",
"Three Dimensional Displays",
"Virtual Reality",
"Cameras",
"Graphical User Interfaces",
"Visualization",
"Image Color Analysis",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Virtual Reality",
"Computing Methodologies",
"Computer Graphics",
"Image Manipulation",
"Image Based Rendering"
],
"authors": [
{
"affiliation": "PSL University,Centre for Robotics, MINES ParisTech,Paris,France",
"fullName": "Grégoire Dupont de Dinechin",
"givenName": "Grégoire Dupont",
"surname": "de Dinechin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "PSL University,Centre for Robotics, MINES ParisTech,Paris,France",
"fullName": "Alexis Paljic",
"givenName": "Alexis",
"surname": "Paljic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "800-801",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090602",
"articleId": "1jIxrupgg92",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090471",
"articleId": "1jIxm9DsWDS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/apscc/2010/9396/0/05708631",
"title": "An Event Based GUI Programming Toolkit for Embedded System",
"doi": null,
"abstractUrl": "/proceedings-article/apscc/2010/05708631/12OmNCdBDJT",
"parentPublication": {
"id": "proceedings/apscc/2010/9396/0",
"title": "2010 Asia-Pacific Services Computing Conference (APSCC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icac/2004/2114/0/01301374",
"title": "A toolkit for policy enablement in autonomic computing",
"doi": null,
"abstractUrl": "/proceedings-article/icac/2004/01301374/12OmNzmLxI6",
"parentPublication": {
"id": "proceedings/icac/2004/2114/0",
"title": "Autonomic Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmpcon/1992/2655/0/00186760",
"title": "An extensible virtual toolkit (XVT) for portable GUI applications",
"doi": null,
"abstractUrl": "/proceedings-article/cmpcon/1992/00186760/12OmNzsJ7jF",
"parentPublication": {
"id": "proceedings/cmpcon/1992/2655/0",
"title": "COMPCON Spring 1992",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a766",
"title": "MR-RIEW: An MR Toolkit for Designing Remote Immersive Experiment Workflows",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a766/1CJemuUb5Be",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a356",
"title": "Where to Render: Studying Renderability for IBR of Large-Scale Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a356/1MNgROnDNsY",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797760",
"title": "Live Coding of a VR Render Engine in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797760/1cJ0OtcoEDe",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090437",
"title": "Demonstrating COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090437/1jIxnwwnDk4",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090615",
"title": "Illustrating COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090615/1jIxpfQsous",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090528",
"title": "From Real to Virtual: An Image-Based Rendering Toolkit to Help Bring the World Around Us Into Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090528/1jIxxI8I2aI",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2021/3225/0/322500a154",
"title": "VRMenuDesigner: A toolkit for automatically generating and modifying VR menus",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2021/322500a154/1zxLy64RGiA",
"parentPublication": {
"id": "proceedings/aivr/2021/3225/0",
"title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxnwwnDk4",
"doi": "10.1109/VRW50115.2020.00273",
"title": "Demonstrating COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"normalizedTitle": "Demonstrating COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"abstract": "This demonstration showcases an open-source toolkit we developed in the Unity game engine to enable authors to render real-world photographs in virtual reality (VR) with motion parallax and view-dependent highlights. First, we illustrate the toolset’s capabilities by using it to display interactive, photorealistic renderings of a museum’s mineral collection. Then, we invite audience members to be rendered in VR using our toolkit, thus providing a live, behind-the-scenes look at the process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This demonstration showcases an open-source toolkit we developed in the Unity game engine to enable authors to render real-world photographs in virtual reality (VR) with motion parallax and view-dependent highlights. First, we illustrate the toolset’s capabilities by using it to display interactive, photorealistic renderings of a museum’s mineral collection. Then, we invite audience members to be rendered in VR using our toolkit, thus providing a live, behind-the-scenes look at the process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This demonstration showcases an open-source toolkit we developed in the Unity game engine to enable authors to render real-world photographs in virtual reality (VR) with motion parallax and view-dependent highlights. First, we illustrate the toolset’s capabilities by using it to display interactive, photorealistic renderings of a museum’s mineral collection. Then, we invite audience members to be rendered in VR using our toolkit, thus providing a live, behind-the-scenes look at the process.",
"fno": "09090437",
"keywords": [
"Rendering Computer Graphics",
"Minerals",
"Virtual Reality",
"Three Dimensional Displays",
"Solid Modeling",
"Open Source Software",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Virtual Reality",
"Computing Methodologies",
"Computer Graphics",
"Image Manipulation",
"Image Based Rendering"
],
"authors": [
{
"affiliation": "PSL University,Centre for Robotics, MINES ParisTech,Paris,France",
"fullName": "Grégoire Dupont de Dinechin",
"givenName": "Grégoire Dupont de",
"surname": "Dinechin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "PSL University,Centre for Robotics, MINES ParisTech,Paris,France",
"fullName": "Alexis Paljic",
"givenName": "Alexis",
"surname": "Paljic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "844-845",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090514",
"articleId": "1jIxugID0He",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090520",
"articleId": "1jIxrC7PXrO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327z031",
"title": "Tutorial 2: Developing Virtual Reality applications with the Visualization Toolkit (VTK)",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327z031/12OmNyRg494",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a760",
"title": "MeasVRe: Measurement Tools for Unity VR Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a760/1CJcElShVwA",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpca/2023/7652/0/10071097",
"title": "Post0-VR: Enabling Universal Realistic Rendering for Modern VR via Exploiting Architectural Similarity and Data Sharing",
"doi": null,
"abstractUrl": "/proceedings-article/hpca/2023/10071097/1LMbGLjCyqc",
"parentPublication": {
"id": "proceedings/hpca/2023/7652/0",
"title": "2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a356",
"title": "Where to Render: Studying Renderability for IBR of Large-Scale Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a356/1MNgROnDNsY",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797760",
"title": "Live Coding of a VR Render Engine in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797760/1cJ0OtcoEDe",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a175",
"title": "3DUITK: An Opensource Toolkit for Thirty Years of Three-Dimensional Interaction Research",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a175/1gyskJgJSda",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090570",
"title": "Presenting COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090570/1jIxhITsCYg",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090615",
"title": "Illustrating COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090615/1jIxpfQsous",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090528",
"title": "From Real to Virtual: An Image-Based Rendering Toolkit to Help Bring the World Around Us Into Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090528/1jIxxI8I2aI",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2021/3225/0/322500a154",
"title": "VRMenuDesigner: A toolkit for automatically generating and modifying VR menus",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2021/322500a154/1zxLy64RGiA",
"parentPublication": {
"id": "proceedings/aivr/2021/3225/0",
"title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxpfQsous",
"doi": "10.1109/VRW50115.2020.00280",
"title": "Illustrating COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"normalizedTitle": "Illustrating COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"abstract": "This video submission illustrates the Core Open Lab on Image-Based Rendering Innovation for Virtual Reality (COLIBRI VR), an opensource toolkit we developed to help authors render photographs of real-world people, objects, and places as responsive 3D assets in VR. We integrated COLIBRI VR as a package for the Unity game engine: in this way, the toolset's methods can easily be accessed from a convenient graphical user interface, and be used in conjunction with the game engine's built-in tools to quickly build interactive virtual reality experiences. Our primary goal is to help users render real- world photographs in VR in a way that provides view-dependent rendering effects and compelling motion parallax. For instance, COLIBRI VR can be used to render captured specular highlights, such as the bright reflections on the facets of a mineral. It also enables providing motion parallax from estimated geometry, e.g., from a depth map associated to a 360° image. We achieve this by implementing efficient image-based rendering methods, which we optimize to run at high framerates for VR. We make the toolkit openly available online, so that it might be used to more easily learn about and apply image-based rendering in the context of virtual reality content creation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This video submission illustrates the Core Open Lab on Image-Based Rendering Innovation for Virtual Reality (COLIBRI VR), an opensource toolkit we developed to help authors render photographs of real-world people, objects, and places as responsive 3D assets in VR. We integrated COLIBRI VR as a package for the Unity game engine: in this way, the toolset's methods can easily be accessed from a convenient graphical user interface, and be used in conjunction with the game engine's built-in tools to quickly build interactive virtual reality experiences. Our primary goal is to help users render real- world photographs in VR in a way that provides view-dependent rendering effects and compelling motion parallax. For instance, COLIBRI VR can be used to render captured specular highlights, such as the bright reflections on the facets of a mineral. It also enables providing motion parallax from estimated geometry, e.g., from a depth map associated to a 360° image. We achieve this by implementing efficient image-based rendering methods, which we optimize to run at high framerates for VR. We make the toolkit openly available online, so that it might be used to more easily learn about and apply image-based rendering in the context of virtual reality content creation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This video submission illustrates the Core Open Lab on Image-Based Rendering Innovation for Virtual Reality (COLIBRI VR), an opensource toolkit we developed to help authors render photographs of real-world people, objects, and places as responsive 3D assets in VR. We integrated COLIBRI VR as a package for the Unity game engine: in this way, the toolset's methods can easily be accessed from a convenient graphical user interface, and be used in conjunction with the game engine's built-in tools to quickly build interactive virtual reality experiences. Our primary goal is to help users render real- world photographs in VR in a way that provides view-dependent rendering effects and compelling motion parallax. For instance, COLIBRI VR can be used to render captured specular highlights, such as the bright reflections on the facets of a mineral. It also enables providing motion parallax from estimated geometry, e.g., from a depth map associated to a 360° image. We achieve this by implementing efficient image-based rendering methods, which we optimize to run at high framerates for VR. We make the toolkit openly available online, so that it might be used to more easily learn about and apply image-based rendering in the context of virtual reality content creation.",
"fno": "09090615",
"keywords": [
"Rendering Computer Graphics",
"Virtual Reality",
"Three Dimensional Displays",
"Games",
"Open Source Software"
],
"authors": [
{
"affiliation": "MINES ParisTech, PSL University,Centre for Robotics,Paris,France",
"fullName": "Grégoire Dupont de Dinechin",
"givenName": "Grégoire Dupont",
"surname": "de Dinechin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MINES ParisTech, PSL University,Centre for Robotics,Paris,France",
"fullName": "Alexis Paljic",
"givenName": "Alexis",
"surname": "Paljic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "856-856",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090574",
"articleId": "1jIxihJ0Qz6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090501",
"articleId": "1jIxiuC3FII",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2005/8929/0/01492801",
"title": "Internal and external scene graphs: a new approach for flexible distributed render engines",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492801/12OmNy5hRfj",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223366",
"title": "BlenderVR: Open-source framework for interactive and immersive VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223366/12OmNy7Qfpl",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2014/8205/0/07026250",
"title": "Render Verse: Hybrid Render Farm for Cluster and Cloud Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2014/07026250/12OmNyuya5N",
"parentPublication": {
"id": "proceedings/ca/2014/8205/0",
"title": "2014 7th Conference on Control and Automation (CA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpca/2023/7652/0/10071097",
"title": "Post0-VR: Enabling Universal Realistic Rendering for Modern VR via Exploiting Architectural Similarity and Data Sharing",
"doi": null,
"abstractUrl": "/proceedings-article/hpca/2023/10071097/1LMbGLjCyqc",
"parentPublication": {
"id": "proceedings/hpca/2023/7652/0",
"title": "2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a356",
"title": "Where to Render: Studying Renderability for IBR of Large-Scale Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a356/1MNgROnDNsY",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797760",
"title": "Live Coding of a VR Render Engine in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797760/1cJ0OtcoEDe",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090570",
"title": "Presenting COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090570/1jIxhITsCYg",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090437",
"title": "Demonstrating COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090437/1jIxnwwnDk4",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090528",
"title": "From Real to Virtual: An Image-Based Rendering Toolkit to Help Bring the World Around Us Into Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090528/1jIxxI8I2aI",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/04/09487526",
"title": "Real VR",
"doi": null,
"abstractUrl": "/magazine/cg/2021/04/09487526/1vg3mB6LNJe",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxxI8I2aI",
"doi": "10.1109/VRW50115.2020.00076",
"title": "From Real to Virtual: An Image-Based Rendering Toolkit to Help Bring the World Around Us Into Virtual Reality",
"normalizedTitle": "From Real to Virtual: An Image-Based Rendering Toolkit to Help Bring the World Around Us Into Virtual Reality",
"abstract": "The release of consumer-grade head-mounted displays has helped bring virtual reality (VR) to our homes, cultural sites, and work-places, increasingly making it a part of our everyday lives. In response, many content creators have expressed renewed interest in bringing the people, objects, and places of our daily lives into VR, helping push the boundaries of our ability to transform photographs of everyday real-world scenes into convincing VR assets. In this paper, we present an open-source solution we developed in the Unity game engine as a way to make this image-based approach to virtual reality simple and accessible to all, to encourage content creators of all kinds to capture and render the world around them in VR. We start by presenting the use cases of image-based virtual reality, from which we discuss the motivations that led us to work on our solution. We then provide details on the development of the toolkit, specifically discussing our implementation of several image-based rendering (IBR) methods. Finally, we present the results of a preliminary user study focused on interface usability and rendering quality, and discuss paths for future work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The release of consumer-grade head-mounted displays has helped bring virtual reality (VR) to our homes, cultural sites, and work-places, increasingly making it a part of our everyday lives. In response, many content creators have expressed renewed interest in bringing the people, objects, and places of our daily lives into VR, helping push the boundaries of our ability to transform photographs of everyday real-world scenes into convincing VR assets. In this paper, we present an open-source solution we developed in the Unity game engine as a way to make this image-based approach to virtual reality simple and accessible to all, to encourage content creators of all kinds to capture and render the world around them in VR. We start by presenting the use cases of image-based virtual reality, from which we discuss the motivations that led us to work on our solution. We then provide details on the development of the toolkit, specifically discussing our implementation of several image-based rendering (IBR) methods. Finally, we present the results of a preliminary user study focused on interface usability and rendering quality, and discuss paths for future work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The release of consumer-grade head-mounted displays has helped bring virtual reality (VR) to our homes, cultural sites, and work-places, increasingly making it a part of our everyday lives. In response, many content creators have expressed renewed interest in bringing the people, objects, and places of our daily lives into VR, helping push the boundaries of our ability to transform photographs of everyday real-world scenes into convincing VR assets. In this paper, we present an open-source solution we developed in the Unity game engine as a way to make this image-based approach to virtual reality simple and accessible to all, to encourage content creators of all kinds to capture and render the world around them in VR. We start by presenting the use cases of image-based virtual reality, from which we discuss the motivations that led us to work on our solution. We then provide details on the development of the toolkit, specifically discussing our implementation of several image-based rendering (IBR) methods. Finally, we present the results of a preliminary user study focused on interface usability and rendering quality, and discuss paths for future work.",
"fno": "09090528",
"keywords": [
"Rendering Computer Graphics",
"Tools",
"Cameras",
"Virtual Reality",
"Games",
"Three Dimensional Displays",
"Engines",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Virtual Reality",
"Computing Methodologies",
"Computer Graphics",
"Image Manipulation",
"Image Based Rendering"
],
"authors": [
{
"affiliation": "PSL University,Centre for Robotics, MINES ParisTech,Paris,France",
"fullName": "Grégoire Dupont de Dinechin",
"givenName": "Grégoire Dupont de",
"surname": "Dinechin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "PSL University,Centre for Robotics, MINES ParisTech,Paris,France",
"fullName": "Alexis Paljic",
"givenName": "Alexis",
"surname": "Paljic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "348-353",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090445",
"articleId": "1jIxz87dXEs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090593",
"articleId": "1jIxnnX0xPi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmu/2017/31/0/08330112",
"title": "Clash tanks: An investigation of virtual and augmented reality gaming experience",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2017/08330112/12OmNB8TU7d",
"parentPublication": {
"id": "proceedings/icmu/2017/31/0",
"title": "2017 Tenth International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esem/2017/4039/0/4039a474",
"title": "An Empirical Study of Open Source Virtual Reality Software Projects",
"doi": null,
"abstractUrl": "/proceedings-article/esem/2017/4039a474/12OmNBghtsE",
"parentPublication": {
"id": "proceedings/esem/2017/4039/0",
"title": "2017 ACM/IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2015/1725/0/07151692",
"title": "Virtual reality enabled scientific visualization workflow",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2015/07151692/12OmNrJAeiv",
"parentPublication": {
"id": "proceedings/wevr/2015/1725/0",
"title": "2015 IEEE 1st Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2017/3091/0/3091a020",
"title": "Visual Representation of Gesture Interaction Feedback in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2017/3091a020/12OmNx5Yviz",
"parentPublication": {
"id": "proceedings/isuvr/2017/3091/0",
"title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223366",
"title": "BlenderVR: Open-source framework for interactive and immersive VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223366/12OmNy7Qfpl",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imet/2022/7016/0/09929500",
"title": "Interactive Historical Documentary in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/imet/2022/09929500/1HYuTheBVYY",
"parentPublication": {
"id": "proceedings/imet/2022/7016/0",
"title": "2022 International Conference on Interactive Media, Smart Systems and Emerging Technologies (IMET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a397",
"title": "Vegetation Rendering Optimization for Virtual Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a397/1ap5wyffDYA",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797989",
"title": "Redirected Jumping: Imperceptibly Manipulating Jump Motions in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797989/1cJ15zHucrC",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090570",
"title": "Presenting COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090570/1jIxhITsCYg",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090615",
"title": "Illustrating COLIBRI VR, an Open-Source Toolkit to Render Real-World Scenes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090615/1jIxpfQsous",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJffY1QgeI",
"doi": "10.1109/VRW55335.2022.00216",
"title": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance",
"normalizedTitle": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance",
"abstract": "Embodiment in augmented reality (AR) is applicable to various fields such as exercise and education. However, full-body embodiment in AR is still challenging to implement due to technical problems such as low body tracking accuracy. Therefore, the study on the impact of an avatar in AR on user performance is limited. We implemented an AR embodiment system and investigated its impact on user physical performance. The system allows users to see their avatar instead of their real body from a third-person perspective. The results show that a muscular avatar improves user physical performance during and after controlling the avatar.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Embodiment in augmented reality (AR) is applicable to various fields such as exercise and education. However, full-body embodiment in AR is still challenging to implement due to technical problems such as low body tracking accuracy. Therefore, the study on the impact of an avatar in AR on user performance is limited. We implemented an AR embodiment system and investigated its impact on user physical performance. The system allows users to see their avatar instead of their real body from a third-person perspective. The results show that a muscular avatar improves user physical performance during and after controlling the avatar.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Embodiment in augmented reality (AR) is applicable to various fields such as exercise and education. However, full-body embodiment in AR is still challenging to implement due to technical problems such as low body tracking accuracy. Therefore, the study on the impact of an avatar in AR on user performance is limited. We implemented an AR embodiment system and investigated its impact on user physical performance. The system allows users to see their avatar instead of their real body from a third-person perspective. The results show that a muscular avatar improves user physical performance during and after controlling the avatar.",
"fno": "840200a730",
"keywords": [
"Augmented Reality",
"Avatars",
"Person Perspective Avatar Embodiment",
"Augmented Reality",
"Proteus Effect",
"Education",
"Full Body Embodiment",
"Low Body Tracking Accuracy",
"User Performance",
"AR Embodiment System",
"User Physical Performance",
"Third Person Perspective",
"Muscular Avatar",
"Human Computer Interaction",
"Three Dimensional Displays",
"Avatars",
"Conferences",
"Education",
"Augmented Reality",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Mixed Augmented Reality",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Empirical Studies In HCI"
],
"authors": [
{
"affiliation": "NAIST,Japan",
"fullName": "Riku Otono",
"givenName": "Riku",
"surname": "Otono",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NAIST,Japan",
"fullName": "Naoya Isoyama",
"givenName": "Naoya",
"surname": "Isoyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NAIST,Japan",
"fullName": "Hideaki Uchiyama",
"givenName": "Hideaki",
"surname": "Uchiyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NAIST,Japan",
"fullName": "Kiyoshi Kiyokawa",
"givenName": "Kiyoshi",
"surname": "Kiyokawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "730-731",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJffa0bnVK",
"name": "pvrw202284020-09757438s1-mm_840200a730.zip",
"size": "79.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757438s1-mm_840200a730.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a728",
"articleId": "1CJe5ADxJle",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a732",
"articleId": "1CJcCMpD8xa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ct/1997/8084/0/80840012",
"title": "The Cyborg's Dilemma: Embodiment in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ct/1997/80840012/12OmNx38vVh",
"parentPublication": {
"id": "proceedings/ct/1997/8084/0",
"title": "Cognitive Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a350",
"title": "Exploring Presence, Avatar Embodiment, and Body Perception with a Holographic Augmented Reality Mirror",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a350/1CJcn3q3J5K",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a503",
"title": "Studying “Avatar Transitions” in Augmented Reality: Influence on Sense of Embodiment and Physiological Activity",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a503/1J7W9twFolO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a260",
"title": "The Effects of Avatar and Environment Design on Embodiment, Presence, Activation, and Task Load in a Virtual Reality Exercise Application",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a260/1JrRf0Dbcac",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049676",
"title": "The Impact of Avatar and Environment Congruence on Plausibility, Embodiment, Presence, and the Proteus Effect in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049676/1KYosbnM8q4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a083",
"title": "I'm Transforming! Effects of Visual Transitions to Change of Avatar on the Sense of Embodiment in AR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a083/1MNgRmjl6Zq",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090668",
"title": "Minimal Embodiment: Effects of a Portable Version of a Virtual Disembodiment Experience on Fear of Death",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090668/1jIxwUJWcGA",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a054",
"title": "The Effects of Body Tracking Fidelity on Embodiment of an Inverse-Kinematic Avatar for Male Participants",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a054/1pyswgi4b7y",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09495125",
"title": "Being an Avatar “for Real”: A Survey on Virtual Embodiment in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09495125/1vyju4jl6AE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pyswgi4b7y",
"doi": "10.1109/ISMAR50242.2020.00025",
"title": "The Effects of Body Tracking Fidelity on Embodiment of an Inverse-Kinematic Avatar for Male Participants",
"normalizedTitle": "The Effects of Body Tracking Fidelity on Embodiment of an Inverse-Kinematic Avatar for Male Participants",
"abstract": "Many research studies have investigated avatar embodiment and its effects on self-location, agency, and body ownership. Researchers have also investigated the effects of various external stimuli and avatar appearances during embodiment. However, the effects of body tracking fidelity while embodying an inverse-kinematic avatar are relatively unexplored. In this paper, we present two studies using a set of six trackers that investigate four levels of body tracking fidelity during avatar embodiment for male participants only: Complete (head, hands, feet, and pelvis trackers), Head-and-Extremities (head, hands, and feet trackers), Head-and-Hands (head and hands trackers), and No-Avatar (head and hands trackers; only controllers visible). Our results indicate that tracking the head, hands, and feet significantly increases the sense of embodiment and the sense of spatial presence when embodying an inverse-kinematic avatar for male participants.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many research studies have investigated avatar embodiment and its effects on self-location, agency, and body ownership. Researchers have also investigated the effects of various external stimuli and avatar appearances during embodiment. However, the effects of body tracking fidelity while embodying an inverse-kinematic avatar are relatively unexplored. In this paper, we present two studies using a set of six trackers that investigate four levels of body tracking fidelity during avatar embodiment for male participants only: Complete (head, hands, feet, and pelvis trackers), Head-and-Extremities (head, hands, and feet trackers), Head-and-Hands (head and hands trackers), and No-Avatar (head and hands trackers; only controllers visible). Our results indicate that tracking the head, hands, and feet significantly increases the sense of embodiment and the sense of spatial presence when embodying an inverse-kinematic avatar for male participants.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many research studies have investigated avatar embodiment and its effects on self-location, agency, and body ownership. Researchers have also investigated the effects of various external stimuli and avatar appearances during embodiment. However, the effects of body tracking fidelity while embodying an inverse-kinematic avatar are relatively unexplored. In this paper, we present two studies using a set of six trackers that investigate four levels of body tracking fidelity during avatar embodiment for male participants only: Complete (head, hands, feet, and pelvis trackers), Head-and-Extremities (head, hands, and feet trackers), Head-and-Hands (head and hands trackers), and No-Avatar (head and hands trackers; only controllers visible). Our results indicate that tracking the head, hands, and feet significantly increases the sense of embodiment and the sense of spatial presence when embodying an inverse-kinematic avatar for male participants.",
"fno": "850800a054",
"keywords": [
"Avatars",
"Gender Issues",
"Human Computer Interaction",
"Body Tracking Fidelity",
"Male Participants",
"Avatar Embodiment",
"Body Ownership",
"Feet Trackers",
"Hands Trackers",
"Inverse Kinematic Avatar",
"Head And Extremities",
"Complete",
"Head And Hands",
"No Avatar",
"Virtual Reality",
"Human Computer Interaction",
"Human Centered Computing",
"Avatars",
"Design Methodology",
"External Stimuli",
"Pelvis",
"Augmented Reality",
"Embodiment",
"Virtual Reality",
"Body Tracking Fidelity",
"Avatars Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Virtual Reality Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 HCI Design And Evaluation Methods X 2014 User Studies",
"Embodiment Virtual Reality Body Tracking Fidelity Avatars Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"HCI Design And Evaluation Methods",
"User Studies"
],
"authors": [
{
"affiliation": "University of Texas at Dallas,School of Arts, Technology, and Emerging Communication,Richardson,TX,USA",
"fullName": "James Coleman Eubanks",
"givenName": "James Coleman",
"surname": "Eubanks",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida,Department of Computer Science,Orlando,FL,USA",
"fullName": "Alec G. Moore",
"givenName": "Alec G.",
"surname": "Moore",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Texas at Dallas,School of Arts, Technology, and Emerging Communication,Richardson,TX,USA",
"fullName": "Paul A. Fishwick",
"givenName": "Paul A.",
"surname": "Fishwick",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida,Department of Computer Science,Orlando,FL,USA",
"fullName": "Ryan P. McMahan",
"givenName": "Ryan P.",
"surname": "McMahan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "54-63",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a045",
"articleId": "1pysxBReola",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a064",
"articleId": "1pysxPMqyTm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892278",
"title": "Bodiless embodiment: A descriptive survey of avatar bodily coherence in first-wave consumer VR applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892278/12OmNvnwVj4",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040591",
"title": "An Evaluation of Self-Avatar Eye Movement for Virtual Embodiment",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040591/13rRUyYBlgz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714123",
"title": "The Impact of Embodiment and Avatar Sizing on Personal Space in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714123/1B0Y0yXxNbG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a730",
"title": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a730/1CJffY1QgeI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a503",
"title": "Studying “Avatar Transitions” in Augmented Reality: Influence on Sense of Embodiment and Physiological Activity",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a503/1J7W9twFolO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a260",
"title": "The Effects of Avatar and Environment Design on Embodiment, Presence, Activation, and Task Load in a Virtual Reality Exercise Application",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a260/1JrRf0Dbcac",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998371",
"title": "The Impact of a Self-Avatar, Hand Collocation, and Hand Proximity on Embodiment and Stroop Interference",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998371/1hrXiia6v9C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090457",
"title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09495125",
"title": "Being an Avatar “for Real”: A Survey on Virtual Embodiment in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09495125/1vyju4jl6AE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNySXF2Z",
"title": "Signal Acquisition and Processing, International Conference on",
"acronym": "icsap",
"groupId": "1002798",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAo45Me",
"doi": "10.1109/ICSAP.2009.44",
"title": "3D Reconstruction Using Level-Based Approach in Stereopsis",
"normalizedTitle": "3D Reconstruction Using Level-Based Approach in Stereopsis",
"abstract": "We present preliminary results of our work on an alternative approach to address correspondence in stereopsis. Accurate correspondence between two images is required to determine depth and hence, the 3D model, out of two 2D images. To achieve this, we studied the relationship among disparity, depth, camera features and data gathered from output images. We propose a level-based approach to enhance the correspondence accuracy. The results show that the algorithm gives accurate results compared to the measurements in the camera set-up.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present preliminary results of our work on an alternative approach to address correspondence in stereopsis. Accurate correspondence between two images is required to determine depth and hence, the 3D model, out of two 2D images. To achieve this, we studied the relationship among disparity, depth, camera features and data gathered from output images. We propose a level-based approach to enhance the correspondence accuracy. The results show that the algorithm gives accurate results compared to the measurements in the camera set-up.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present preliminary results of our work on an alternative approach to address correspondence in stereopsis. Accurate correspondence between two images is required to determine depth and hence, the 3D model, out of two 2D images. To achieve this, we studied the relationship among disparity, depth, camera features and data gathered from output images. We propose a level-based approach to enhance the correspondence accuracy. The results show that the algorithm gives accurate results compared to the measurements in the camera set-up.",
"fno": "05163842",
"keywords": [
"Cameras",
"Image Reconstruction",
"Stereo Image Processing",
"3 D Reconstruction",
"Level Based Approach",
"Stereopsis",
"Output Images",
"Level Based Approach",
"Correspondence Accuracy",
"Camera Set Up",
"Cameras",
"Image Edge Detection",
"Image Reconstruction",
"Computer Vision",
"Stereo Vision",
"Layout",
"Signal Processing",
"Visual Perception",
"Retina",
"Object Recognition",
"Correspondence",
"Computer Vision",
"Stereopsis"
],
"authors": [
{
"affiliation": null,
"fullName": "Sayed Ali Kasaei Zadeh",
"givenName": "Sayed Ali Kasaei",
"surname": "Zadeh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Abas Md Said",
"givenName": "Abas Md",
"surname": "Said",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icsap",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-04-01T00:00:00",
"pubType": "proceedings",
"pages": "138-142",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3594-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05163841",
"articleId": "12OmNrGsDrr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05163843",
"articleId": "12OmNynsbBK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032c326",
"title": "SurfaceNet: An End-to-End 3D Neural Network for Multiview Stereopsis",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c326/12OmNB8TUfZ",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/3/07295102",
"title": "A Bayesian framework for enhanced geometric reconstruction of complex objects by Helmholtz stereopsis",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295102/12OmNC8dghe",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1991/2148/0/00139664",
"title": "Stereopsis and image registration from extended edge features in the absence of camera pose information",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1991/00139664/12OmNvrvj7V",
"parentPublication": {
"id": "proceedings/cvpr/1991/2148/0",
"title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2005/9385/0/01577308",
"title": "Shape reconstruction from two color images using photometric stereo combined with segmentation and stereopsis",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2005/01577308/12OmNwe2IyN",
"parentPublication": {
"id": "proceedings/avss/2005/9385/0",
"title": "IEEE Conference on Advanced Video and Signal Based Surveillance, 2005.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/058P1B05",
"title": "Leveraging stereopsis for saliency analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/058P1B05/12OmNy50g5B",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1988/0878/0/00028320",
"title": "Collinear trinocular stereo using two-level dynamic programming",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028320/12OmNyjtNGz",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00341001",
"title": "Stereopsis for verging systems",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00341001/12OmNyoAA5e",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2003/1950/2/195021411",
"title": "Binocular Helmholtz Stereopsis",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2003/195021411/12OmNywxlJn",
"parentPublication": {
"id": "proceedings/iccv/2003/1950/2",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a637",
"title": "A High-Performance Hardware Architecture for a Frameless Stereo Vision Algorithm Implemented on a FPGA Platform",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a637/12OmNz6iOqV",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714042",
"title": "Stereopsis Only: Validation of a Monocular Depth Cues Reduced Gamified Virtual Reality with Reaction Time Measurement",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714042/1B0XZrABGk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwdbV00",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy50g5B",
"doi": "10.1109/CVPR.2012.6247708",
"title": "Leveraging stereopsis for saliency analysis",
"normalizedTitle": "Leveraging stereopsis for saliency analysis",
"abstract": "Stereopsis provides an additional depth cue and plays an important role in the human vision system. This paper explores stereopsis for saliency analysis and presents two approaches to stereo saliency detection from stereoscopic images. The first approach computes stereo saliency based on the global disparity contrast in the input image. The second approach leverages domain knowledge in stereoscopic photography. A good stereoscopic image takes care of its disparity distribution to avoid 3D fatigue. Particularly, salient content tends to be positioned in the stereoscopic comfort zone to alleviate the vergence-accommodation conflict. Accordingly, our method computes stereo saliency of an image region based on the distance between its perceived location and the comfort zone. Moreover, we consider objects popping out from the screen salient as these objects tend to catch a viewer's attention. We build a stereo saliency analysis benchmark dataset that contains 1000 stereoscopic images with salient object masks. Our experiments on this dataset show that stereo saliency provides a useful complement to existing visual saliency analysis and our method can successfully detect salient content from images that are difficult for monocular saliency analysis methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Stereopsis provides an additional depth cue and plays an important role in the human vision system. This paper explores stereopsis for saliency analysis and presents two approaches to stereo saliency detection from stereoscopic images. The first approach computes stereo saliency based on the global disparity contrast in the input image. The second approach leverages domain knowledge in stereoscopic photography. A good stereoscopic image takes care of its disparity distribution to avoid 3D fatigue. Particularly, salient content tends to be positioned in the stereoscopic comfort zone to alleviate the vergence-accommodation conflict. Accordingly, our method computes stereo saliency of an image region based on the distance between its perceived location and the comfort zone. Moreover, we consider objects popping out from the screen salient as these objects tend to catch a viewer's attention. We build a stereo saliency analysis benchmark dataset that contains 1000 stereoscopic images with salient object masks. Our experiments on this dataset show that stereo saliency provides a useful complement to existing visual saliency analysis and our method can successfully detect salient content from images that are difficult for monocular saliency analysis methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Stereopsis provides an additional depth cue and plays an important role in the human vision system. This paper explores stereopsis for saliency analysis and presents two approaches to stereo saliency detection from stereoscopic images. The first approach computes stereo saliency based on the global disparity contrast in the input image. The second approach leverages domain knowledge in stereoscopic photography. A good stereoscopic image takes care of its disparity distribution to avoid 3D fatigue. Particularly, salient content tends to be positioned in the stereoscopic comfort zone to alleviate the vergence-accommodation conflict. Accordingly, our method computes stereo saliency of an image region based on the distance between its perceived location and the comfort zone. Moreover, we consider objects popping out from the screen salient as these objects tend to catch a viewer's attention. We build a stereo saliency analysis benchmark dataset that contains 1000 stereoscopic images with salient object masks. Our experiments on this dataset show that stereo saliency provides a useful complement to existing visual saliency analysis and our method can successfully detect salient content from images that are difficult for monocular saliency analysis methods.",
"fno": "058P1B05",
"keywords": [
"Stereo Image Processing",
"Computer Vision",
"Photography",
"Monocular Saliency Analysis",
"Stereopsis Leveraging",
"Human Vision System",
"Stereo Saliency Detection",
"Stereoscopic Images",
"Global Disparity Contrast",
"Domain Knowledge",
"Stereoscopic Photography",
"Disparity Distribution",
"Vergence Accommodation Conflict",
"Image Region",
"Perceived Location",
"Comfort Zone",
"Objects Popping",
"Viewer Attention",
"Salient Object Masks",
"Visual Saliency Analysis",
"Stereo Image Processing",
"Visualization",
"Cameras",
"Photography",
"Benchmark Testing",
"Image Color Analysis"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., Portland State Univ., Portland, OR, USA",
"fullName": "Feng Liu",
"givenName": null,
"surname": "Feng Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Comput. Sci. & Technol., Shandong Univ., Jinan, China",
"fullName": "Xueqing Li",
"givenName": null,
"surname": "Xueqing Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Portland State Univ., Portland, OR, USA",
"fullName": "Yujie Geng",
"givenName": null,
"surname": "Yujie Geng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Portland State Univ., Portland, OR, USA",
"fullName": "Yuzhen Niu",
"givenName": null,
"surname": "Yuzhen Niu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "454-461",
"year": "2012",
"issn": "1063-6919",
"isbn": "978-1-4673-1226-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "057P1B04",
"articleId": "12OmNwp74GW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "059P1B06",
"articleId": "12OmNB1wkJP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sitis/2011/4635/0/4635a298",
"title": "A Novel Visual Saliency Model for Surveillance Video Compression",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2011/4635a298/12OmNCbU3bW",
"parentPublication": {
"id": "proceedings/sitis/2011/4635/0",
"title": "2011 Seventh International Conference on Signal Image Technology & Internet-Based Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890709",
"title": "Learning visual saliency for stereoscopic images",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890709/12OmNqIhFMD",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a309",
"title": "Edge Saliency Map Detection with Texture Suppression",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a309/12OmNqJZgCI",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00341001",
"title": "Stereopsis for verging systems",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00341001/12OmNyoAA5e",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2011/4517/0/4517a240",
"title": "Visual Attention Model with Cross-Layer Saliency Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2011/4517a240/12OmNyuPL6f",
"parentPublication": {
"id": "proceedings/iih-msp/2011/4517/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/11/ttp2011112147",
"title": "A Component-Wise Analysis of Constructible Match Cost Functions for Global Stereopsis",
"doi": null,
"abstractUrl": "/journal/tp/2011/11/ttp2011112147/13rRUwghd69",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07544591",
"title": "Stereoscopic Thumbnail Creation via Efficient Stereo Saliency Detection",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07544591/13rRUyfbwqP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486571",
"title": "Multi-Path Feature Fusion Network for Saliency Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486571/14jQfPGqOcz",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300j741",
"title": "Learning to Explore Intrinsic Saliency for Stereoscopic Video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300j741/1gyrLoWsF1u",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090432",
"title": "Perceptual Distortions Between Windows and Screens: Stereopsis Predicts Motion Parallax",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090432/1jIxqgOQSDS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwp74rq",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "1993",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyoAA5e",
"doi": "10.1109/CVPR.1993.341001",
"title": "Stereopsis for verging systems",
"normalizedTitle": "Stereopsis for verging systems",
"abstract": "The implications of vergence control and active vision for stereopsis in robots and humans are investigated. It is argued that the geometry of verging systems places strong constraints on the ecological role of stereopsis. In particular, stereopsis is poorly suited to building whole-field depth maps but well suited to gathering information about specific targets of interest in the scene in a coordinate frame referenced to the fixation point. A simple, fast stereo system targeted at the latter goal is presented.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The implications of vergence control and active vision for stereopsis in robots and humans are investigated. It is argued that the geometry of verging systems places strong constraints on the ecological role of stereopsis. In particular, stereopsis is poorly suited to building whole-field depth maps but well suited to gathering information about specific targets of interest in the scene in a coordinate frame referenced to the fixation point. A simple, fast stereo system targeted at the latter goal is presented.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The implications of vergence control and active vision for stereopsis in robots and humans are investigated. It is argued that the geometry of verging systems places strong constraints on the ecological role of stereopsis. In particular, stereopsis is poorly suited to building whole-field depth maps but well suited to gathering information about specific targets of interest in the scene in a coordinate frame referenced to the fixation point. A simple, fast stereo system targeted at the latter goal is presented.",
"fno": "00341001",
"keywords": [
"Visual Perception",
"Stereo Image Processing",
"Ecology",
"Robot Vision",
"Human Vision",
"Verging Systems",
"Vergence Control",
"Active Vision",
"Stereopsis",
"Coordinate Frame",
"Fixation Point",
"Humans",
"Robot Vision Systems",
"Robot Kinematics",
"Cameras",
"Stereo Vision",
"Eyes",
"Layout",
"Control Systems",
"Computer Science",
"Computer Vision"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., Virginia Univ., Charlottesville, VA, USA",
"fullName": "T.J. Olson",
"givenName": "T.J.",
"surname": "Olson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1993-01-01T00:00:00",
"pubType": "proceedings",
"pages": "55-60",
"year": "1993",
"issn": "1063-6919",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00341000",
"articleId": "12OmNAnMuMB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00341002",
"articleId": "12OmNyKJiyP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icsap/2009/3594/0/05163842",
"title": "3D Reconstruction Using Level-Based Approach in Stereopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icsap/2009/05163842/12OmNAo45Me",
"parentPublication": {
"id": "proceedings/icsap/2009/3594/0",
"title": "Signal Acquisition and Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1993/3870/0/00378186",
"title": "Dynamic fixation [active vision]",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1993/00378186/12OmNBTawxC",
"parentPublication": {
"id": "proceedings/iccv/1993/3870/0",
"title": "1993 (4th) International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1993/3870/0/00378176",
"title": "Relative depth from vergence micromovements",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1993/00378176/12OmNrkBwyx",
"parentPublication": {
"id": "proceedings/iccv/1993/3870/0",
"title": "1993 (4th) International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1991/2148/0/00139664",
"title": "Stereopsis and image registration from extended edge features in the absence of camera pose information",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1991/00139664/12OmNvrvj7V",
"parentPublication": {
"id": "proceedings/cvpr/1991/2148/0",
"title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/058P1B05",
"title": "Leveraging stereopsis for saliency analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/058P1B05/12OmNy50g5B",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118a406",
"title": "Large Scale Multi-view Stereopsis Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118a406/12OmNyaXPMa",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/1/71080249",
"title": "Planar projection stereopsis method for road extraction",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71080249/12OmNzcxZiE",
"parentPublication": {
"id": "proceedings/iros/1995/7108/1",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/11/ttp2011112147",
"title": "A Component-Wise Analysis of Constructible Match Cost Functions for Global Stereopsis",
"doi": null,
"abstractUrl": "/journal/tp/2011/11/ttp2011112147/13rRUwghd69",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/08/ttp2010081362",
"title": "Accurate, Dense, and Robust Multiview Stereopsis",
"doi": null,
"abstractUrl": "/journal/tp/2010/08/ttp2010081362/13rRUxcKzWq",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714042",
"title": "Stereopsis Only: Validation of a Monocular Depth Cues Reduced Gamified Virtual Reality with Reaction Time Measurement",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714042/1B0XZrABGk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvRU0lg",
"title": "Computer Vision, IEEE International Conference on",
"acronym": "iccv",
"groupId": "1000149",
"volume": "2",
"displayVolume": "3",
"year": "2003",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNywxlJn",
"doi": "10.1109/ICCV.2003.1238655",
"title": "Binocular Helmholtz Stereopsis",
"normalizedTitle": "Binocular Helmholtz Stereopsis",
"abstract": "Helmholtz stereopsis has been introduced recently as a surface reconstruction technique that does not assume a model of surface reflectance. In the reported formulation, correspondence was established using a rank constraint, necessitating at least three viewpoints and three pairs of images. Here, it is revealed that the fundamental Helmholtz stereopsis constraint defines a nonlinear partial differential equation, which can be solved using only two images. It is shown that, unlike conventional stereo, binocular Helmholtz stereopsis is able to establish correspondence (and thereby recover surface depth) for objects having an arbitrary and unknown BRDF and in textureless regions (i.e., regions of constant or slowly varying BRDF). An implementation and experimental results validate the method for specular surfaces with and without texture.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Helmholtz stereopsis has been introduced recently as a surface reconstruction technique that does not assume a model of surface reflectance. In the reported formulation, correspondence was established using a rank constraint, necessitating at least three viewpoints and three pairs of images. Here, it is revealed that the fundamental Helmholtz stereopsis constraint defines a nonlinear partial differential equation, which can be solved using only two images. It is shown that, unlike conventional stereo, binocular Helmholtz stereopsis is able to establish correspondence (and thereby recover surface depth) for objects having an arbitrary and unknown BRDF and in textureless regions (i.e., regions of constant or slowly varying BRDF). An implementation and experimental results validate the method for specular surfaces with and without texture.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Helmholtz stereopsis has been introduced recently as a surface reconstruction technique that does not assume a model of surface reflectance. In the reported formulation, correspondence was established using a rank constraint, necessitating at least three viewpoints and three pairs of images. Here, it is revealed that the fundamental Helmholtz stereopsis constraint defines a nonlinear partial differential equation, which can be solved using only two images. It is shown that, unlike conventional stereo, binocular Helmholtz stereopsis is able to establish correspondence (and thereby recover surface depth) for objects having an arbitrary and unknown BRDF and in textureless regions (i.e., regions of constant or slowly varying BRDF). An implementation and experimental results validate the method for specular surfaces with and without texture.",
"fno": "195021411",
"keywords": [],
"authors": [
{
"affiliation": "Yale University",
"fullName": "Todd E. Zickler",
"givenName": "Todd E.",
"surname": "Zickler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California at San Diego",
"fullName": "Jeffrey Ho",
"givenName": "Jeffrey",
"surname": "Ho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California at San Diego",
"fullName": "David J. Kriegman",
"givenName": "David J.",
"surname": "Kriegman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Illinois at Urbana-Champaign",
"fullName": "Jean Ponce",
"givenName": "Jean",
"surname": "Ponce",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Columbia University, New York",
"fullName": "Peter N. Belhumeur",
"givenName": "Peter N.",
"surname": "Belhumeur",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2003-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1411",
"year": "2003",
"issn": null,
"isbn": "0-7695-1950-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "195021403",
"articleId": "12OmNAm4TI6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "195021418",
"articleId": "12OmNCwlaky",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/visapp/2014/8133/3/07295102",
"title": "A Bayesian framework for enhanced geometric reconstruction of complex objects by Helmholtz stereopsis",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295102/12OmNC8dghe",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2003/1900/1/190010548",
"title": "Toward a Stratification of Helmholtz Stereopsis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2003/190010548/12OmNweTvMC",
"parentPublication": {
"id": "proceedings/cvpr/2003/1900/1",
"title": "2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2007/2996/0/29960213",
"title": "An Opto-Mechanical Apparatus for Binocular Helmholtz Stereopsis in Static and Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2007/29960213/12OmNwwd2XD",
"parentPublication": {
"id": "proceedings/sibgrapi/2007/2996/0",
"title": "XX Brazilian Symposium on Computer Graphics and Image Processing (SIBGRAPI 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a251",
"title": "Colour Helmholtz Stereopsis for Reconstruction of Complex Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a251/12OmNx3q73E",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/2/00413667",
"title": "Global priors for binocular stereopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413667/12OmNxxdZDH",
"parentPublication": {
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2004/2223/0/22230010",
"title": "Helmholtz Stereopsis on Rough and Strongly Textured Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2004/22230010/12OmNzahccN",
"parentPublication": {
"id": "proceedings/3dpvt/2004/2223/0",
"title": "3D Data Processing Visualization and Transmission, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/1/01315028",
"title": "Radiometric calibration of a Helmholtz stereo rig",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315028/12OmNzdoMXW",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/1",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/09/08048554",
"title": "Bayesian Helmholtz Stereopsis with Integrability Prior",
"doi": null,
"abstractUrl": "/journal/tp/2018/09/08048554/13rRUxly8Um",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f017",
"title": "Polarimetric Helmholtz Stereopsis",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f017/1BmGZZxT1RK",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H0O034AQuI",
"doi": "10.1109/CVPR52688.2022.00198",
"title": "ChiTransformer: Towards Reliable Stereo from Cues",
"normalizedTitle": "ChiTransformer: Towards Reliable Stereo from Cues",
"abstract": "Current stereo matching techniques are challenged by restricted searching space, occluded regions and sheer size. While single image depth estimation is spared from these challenges and can achieve satisfactory results with the extracted monocular cues, the lack of stereoscopic relationship renders the monocular prediction less reliable on its own especially in highly dynamic or cluttered environments. To address these issues in both scenarios, we present an optic-chiasm-inspired self-supervised binocular depth estimation method, wherein vision transformer (ViT) with a gated positional cross-attention (GPCA) layer is designed to enable feature-sensitive pattern retrieval between views, while retaining the extensive context information aggregated through self-attentions. Monocular cues from a single view are thereafter conditionally rectified by a blending layer with the retrieved pattern pairs. This crossover design is biologically analogous to the optic-chasma structure in human visual system and hence the name, Chi-Transformer. Our experiments show that this architecture yields substantial improvements over state-of-the-art self-supervised stereo approaches by 11%, and can be used on both rectilinear and non-rectilinear (e.g., fisheye) images.<sup>1</sup><sup>1</sup>https://github.com/ISL-CV/ChiTransformer.git",
"abstracts": [
{
"abstractType": "Regular",
"content": "Current stereo matching techniques are challenged by restricted searching space, occluded regions and sheer size. While single image depth estimation is spared from these challenges and can achieve satisfactory results with the extracted monocular cues, the lack of stereoscopic relationship renders the monocular prediction less reliable on its own especially in highly dynamic or cluttered environments. To address these issues in both scenarios, we present an optic-chiasm-inspired self-supervised binocular depth estimation method, wherein vision transformer (ViT) with a gated positional cross-attention (GPCA) layer is designed to enable feature-sensitive pattern retrieval between views, while retaining the extensive context information aggregated through self-attentions. Monocular cues from a single view are thereafter conditionally rectified by a blending layer with the retrieved pattern pairs. This crossover design is biologically analogous to the optic-chasma structure in human visual system and hence the name, Chi-Transformer. Our experiments show that this architecture yields substantial improvements over state-of-the-art self-supervised stereo approaches by 11%, and can be used on both rectilinear and non-rectilinear (e.g., fisheye) images.<sup>1</sup><sup>1</sup>https://github.com/ISL-CV/ChiTransformer.git",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Current stereo matching techniques are challenged by restricted searching space, occluded regions and sheer size. While single image depth estimation is spared from these challenges and can achieve satisfactory results with the extracted monocular cues, the lack of stereoscopic relationship renders the monocular prediction less reliable on its own especially in highly dynamic or cluttered environments. To address these issues in both scenarios, we present an optic-chiasm-inspired self-supervised binocular depth estimation method, wherein vision transformer (ViT) with a gated positional cross-attention (GPCA) layer is designed to enable feature-sensitive pattern retrieval between views, while retaining the extensive context information aggregated through self-attentions. Monocular cues from a single view are thereafter conditionally rectified by a blending layer with the retrieved pattern pairs. This crossover design is biologically analogous to the optic-chasma structure in human visual system and hence the name, Chi-Transformer. Our experiments show that this architecture yields substantial improvements over state-of-the-art self-supervised stereo approaches by 11%, and can be used on both rectilinear and non-rectilinear (e.g., fisheye) images.11https://github.com/ISL-CV/ChiTransformer.git",
"fno": "694600b929",
"keywords": [
"Computer Vision",
"Image Matching",
"Stereo Image Processing",
"Self Attentions",
"Blending Layer",
"Retrieved Pattern Pairs",
"Crossover Design",
"Optic Chasma Structure",
"Human Visual System",
"Chi Transformer",
"Architecture Yields Substantial Improvements",
"State Of The Art Self Supervised Stereo Approaches",
"Towards Reliable Stereo",
"Current Stereo Matching Techniques",
"Restricted Searching Space",
"Occluded Regions",
"Sheer Size",
"Single Image Depth Estimation",
"Extracted Monocular Cues",
"Stereoscopic Relationship",
"Monocular Prediction",
"Highly Dynamic Environments",
"Cluttered Environments",
"Optic Chiasm Inspired Self",
"Binocular Depth Estimation Method",
"Vision Transformer",
"Vi T",
"Gated Positional Cross Attention Layer",
"GPCA",
"Feature Sensitive Pattern Retrieval",
"Extensive Context Information",
"Optical Polarization",
"Biomedical Optical Imaging",
"Optical Design",
"Stereo Image Processing",
"Estimation",
"Visual Systems",
"Optical Imaging"
],
"authors": [
{
"affiliation": "Georgia State University",
"fullName": "Qing Su",
"givenName": "Qing",
"surname": "Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Georgia State University",
"fullName": "Shihao Ji",
"givenName": "Shihao",
"surname": "Ji",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1929-1939",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H0NZZYZBHa",
"name": "pcvpr202269460-09879564s1-mm_694600b929.zip",
"size": "1.06 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879564s1-mm_694600b929.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600b919",
"articleId": "1H1iFQnoZ6E",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600b940",
"articleId": "1H0NElMJ3Da",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isspit/2015/0481/0/07394362",
"title": "Autonomous Glaucoma detection from fundus image using cup to disc ratio and hybrid features",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2015/07394362/12OmNBEpnym",
"parentPublication": {
"id": "proceedings/isspit/2015/0481/0",
"title": "2015 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wiamis/2008/3130/0/3130a179",
"title": "Luminance Correction in Stereo Correspondence Based Structure from Motion",
"doi": null,
"abstractUrl": "/proceedings-article/wiamis/2008/3130a179/12OmNqOffB8",
"parentPublication": {
"id": "proceedings/wiamis/2008/3130/0",
"title": "Image Analysis for Multimedia Interactive Services, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2014/5921/0/07041908",
"title": "Development of spectropolarimetric imagers for imaging of desert soils",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2014/07041908/12OmNvpew9I",
"parentPublication": {
"id": "proceedings/aipr/2014/5921/0",
"title": "2014 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/memsys/1997/3744/0/00581858",
"title": "Compact optomechanical switches and their applications in optical communication and testing systems",
"doi": null,
"abstractUrl": "/proceedings-article/memsys/1997/00581858/12OmNwkzuvD",
"parentPublication": {
"id": "proceedings/memsys/1997/3744/0",
"title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wvm/1991/2153/0/00212787",
"title": "Estimating 3D vehicle motion in an outdoor scene from monocular and stereo image sequences",
"doi": null,
"abstractUrl": "/proceedings-article/wvm/1991/00212787/12OmNy5R3Bi",
"parentPublication": {
"id": "proceedings/wvm/1991/2153/0",
"title": "Proceedings of the IEEE Workshop on Visual Motion",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2006/06/i0968",
"title": "Stereo Using Monocular Cues within the Tensor Voting Framework",
"doi": null,
"abstractUrl": "/journal/tp/2006/06/i0968/13rRUxBa5sW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2022/6803/0/680300a482",
"title": "A CNN-Transformer Hybrid Network for Joint Optic Cup and Optic Disc Segmentation in Fundus Images",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2022/680300a482/1FUUoPkclpu",
"parentPublication": {
"id": "proceedings/icceai/2022/6803/0",
"title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956197",
"title": "On Depth Error from Spherical Camera Calibration within Omnidirectional Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956197/1IHqnsyNjbO",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956086",
"title": "Vision Transformers Based Classification for Glaucomatous Eye Condition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956086/1IHqux4uSHe",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2020/9574/0/957400a759",
"title": "Blood Vessel Segmentation from Retinal Images",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2020/957400a759/1pBMuKA6GPK",
"parentPublication": {
"id": "proceedings/bibe/2020/9574/0",
"title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgwmXTiz6",
"doi": "10.1109/VR55154.2023.00055",
"title": "Manipulation of Motion Parallax Gain Distorts Perceived Distance and Object Depth in Virtual Reality",
"normalizedTitle": "Manipulation of Motion Parallax Gain Distorts Perceived Distance and Object Depth in Virtual Reality",
"abstract": "Virtual reality (VR) is distinguished by the rich, multimodal, im-mersive sensory information and affordances provided to the user. However, when moving about an immersive virtual world the vi-sual display often conflicts with other sensory cues due to design, the nature of the simulation, or to system limitations (for example impoverished vestibular motion cues during acceleration in racing games). Given that conflicts between sensory cues have been as-sociated with disorientation or discomfort, and theoretically could distort spatial perception, it is important that we understand how and when they are manifested in the user experience. To this end, this set of experiments investigates the impact of mismatch between physical and virtual motion parallax on the per-ception of the depth of an apparently perpendicular dihedral angle (a fold) and its distance. We applied gain distortions between visual and kinesthetic head motion during lateral sway movements and measured the effect of gain on depth, distance and lateral space compression. We found that under monocular viewing, observers made smaller object depth and distance settings especially when the gain was greater than 1. Estimates of target distance declined with increasing gain under monocular viewing. Similarly, mean set depth decreased with increasing gain under monocular viewing, except at 6.0 m. The effect of gain was minimal when observers viewed the stimulus binocularly. Further, binocular viewing (stereopsis) improved the precision but not necessarily the accuracy of gain perception. Overall, the lateral compression of space was similar in the stereoscopic and monocular test conditions. Taken together, our results show that the use of large presentation distances (at 6 m) combined with binocular cues to depth and distance enhanced humans' tolerance to visual and kinesthetic mismatch.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality (VR) is distinguished by the rich, multimodal, im-mersive sensory information and affordances provided to the user. However, when moving about an immersive virtual world the vi-sual display often conflicts with other sensory cues due to design, the nature of the simulation, or to system limitations (for example impoverished vestibular motion cues during acceleration in racing games). Given that conflicts between sensory cues have been as-sociated with disorientation or discomfort, and theoretically could distort spatial perception, it is important that we understand how and when they are manifested in the user experience. To this end, this set of experiments investigates the impact of mismatch between physical and virtual motion parallax on the per-ception of the depth of an apparently perpendicular dihedral angle (a fold) and its distance. We applied gain distortions between visual and kinesthetic head motion during lateral sway movements and measured the effect of gain on depth, distance and lateral space compression. We found that under monocular viewing, observers made smaller object depth and distance settings especially when the gain was greater than 1. Estimates of target distance declined with increasing gain under monocular viewing. Similarly, mean set depth decreased with increasing gain under monocular viewing, except at 6.0 m. The effect of gain was minimal when observers viewed the stimulus binocularly. Further, binocular viewing (stereopsis) improved the precision but not necessarily the accuracy of gain perception. Overall, the lateral compression of space was similar in the stereoscopic and monocular test conditions. Taken together, our results show that the use of large presentation distances (at 6 m) combined with binocular cues to depth and distance enhanced humans' tolerance to visual and kinesthetic mismatch.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality (VR) is distinguished by the rich, multimodal, im-mersive sensory information and affordances provided to the user. However, when moving about an immersive virtual world the vi-sual display often conflicts with other sensory cues due to design, the nature of the simulation, or to system limitations (for example impoverished vestibular motion cues during acceleration in racing games). Given that conflicts between sensory cues have been as-sociated with disorientation or discomfort, and theoretically could distort spatial perception, it is important that we understand how and when they are manifested in the user experience. To this end, this set of experiments investigates the impact of mismatch between physical and virtual motion parallax on the per-ception of the depth of an apparently perpendicular dihedral angle (a fold) and its distance. We applied gain distortions between visual and kinesthetic head motion during lateral sway movements and measured the effect of gain on depth, distance and lateral space compression. We found that under monocular viewing, observers made smaller object depth and distance settings especially when the gain was greater than 1. Estimates of target distance declined with increasing gain under monocular viewing. Similarly, mean set depth decreased with increasing gain under monocular viewing, except at 6.0 m. The effect of gain was minimal when observers viewed the stimulus binocularly. Further, binocular viewing (stereopsis) improved the precision but not necessarily the accuracy of gain perception. Overall, the lateral compression of space was similar in the stereoscopic and monocular test conditions. Taken together, our results show that the use of large presentation distances (at 6 m) combined with binocular cues to depth and distance enhanced humans' tolerance to visual and kinesthetic mismatch.",
"fno": "481500a398",
"keywords": [
"Visualization",
"Solid Modeling",
"Three Dimensional Displays",
"Sensitivity",
"Shape",
"Stereo Image Processing",
"Virtual Reality",
"Depth Perception",
"Egocentric Distance",
"Motion Gain",
"Motion Parallax"
],
"authors": [
{
"affiliation": "York University,Centre for Vision Research,Department of Electrical Engineering & Computer Science",
"fullName": "Xue Teng",
"givenName": "Xue",
"surname": "Teng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "York University,Centre for Vision Research,Department of Electrical Engineering & Computer Science",
"fullName": "Robert S. Allison",
"givenName": "Robert S.",
"surname": "Allison",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "York University,Centre for Vision Research,Department of Psychology",
"fullName": "Laurie M. Wilcox",
"givenName": "Laurie M.",
"surname": "Wilcox",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "398-408",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1MNgweFPK0g",
"name": "pvr202348150-010108494s1-mm_481500a398.zip",
"size": "87.1 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108494s1-mm_481500a398.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "481500a387",
"articleId": "1MNgnsaWhji",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a409",
"articleId": "1MNgA7qw20U",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948413",
"title": "A study of depth perception in hand-held augmented reality using autostereoscopic displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948413/12OmNAoUTpO",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989c091",
"title": "Revisiting Depth Layers from Occlusions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989c091/12OmNx8Ouxn",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492802",
"title": "A study of depth visualization techniques for virtual annotations in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492802/12OmNyo1nO1",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/05/mcg2015050034",
"title": "Reducing Visual Discomfort with HMDs Using Dynamic Depth of Field",
"doi": null,
"abstractUrl": "/magazine/cg/2015/05/mcg2015050034/13rRUEgarvh",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg2012040581",
"title": "The Right View from the Wrong Location: Depth Perception in Stereoscopic Multi-User Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg2012040581/13rRUxASuGh",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714042",
"title": "Stereopsis Only: Validation of a Monocular Depth Cues Reduced Gamified Virtual Reality with Reaction Time Measurement",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714042/1B0XZrABGk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2019/3918/0/391800a758",
"title": "The Monocular Stereoscopic Model Based on an Ordered Wave Particle Swarm",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2019/391800a758/1gRxnxuyvDy",
"parentPublication": {
"id": "proceedings/itme/2019/3918/0",
"title": "2019 10th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a064",
"title": "The Effects of Object Shape, Fidelity, Color, and Luminance on Depth Perception in Handheld Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a064/1pysxPMqyTm",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a191",
"title": "Unsupervised Learning of Depth Estimation Based on Attention Model from Monocular Images",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a191/1vg7TiOtZQY",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a082",
"title": "Comparing Distance Judgments in Real and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a082/1yfxMk2JFHW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNClQ0o4",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqI04Pu",
"doi": "10.1109/CVPRW.2010.5543463",
"title": "Dynamic projection environments for immersive visualization",
"normalizedTitle": "Dynamic projection environments for immersive visualization",
"abstract": "We present a system for dynamic projection on large, human-scale, moving projection screens and demonstrate this system for immersive visualization applications in several fields. We have designed and implemented efficient, low-cost methods for robust tracking of projection surfaces, and a method to provide high frame rate output for computationally-intensive, low frame rate applications. We present a distributed rendering environment which allows many projectors to work together to illuminate the projection surfaces. This physically immersive visualization environment promotes innovation and creativity in design and analysis applications and facilitates exploration of alternative visualization styles and modes. The system provides for multiple participants to interact in a shared environment in a natural manner. Our new human-scale user interface is intuitive and novice users require essentially no instruction to operate the visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a system for dynamic projection on large, human-scale, moving projection screens and demonstrate this system for immersive visualization applications in several fields. We have designed and implemented efficient, low-cost methods for robust tracking of projection surfaces, and a method to provide high frame rate output for computationally-intensive, low frame rate applications. We present a distributed rendering environment which allows many projectors to work together to illuminate the projection surfaces. This physically immersive visualization environment promotes innovation and creativity in design and analysis applications and facilitates exploration of alternative visualization styles and modes. The system provides for multiple participants to interact in a shared environment in a natural manner. Our new human-scale user interface is intuitive and novice users require essentially no instruction to operate the visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a system for dynamic projection on large, human-scale, moving projection screens and demonstrate this system for immersive visualization applications in several fields. We have designed and implemented efficient, low-cost methods for robust tracking of projection surfaces, and a method to provide high frame rate output for computationally-intensive, low frame rate applications. We present a distributed rendering environment which allows many projectors to work together to illuminate the projection surfaces. This physically immersive visualization environment promotes innovation and creativity in design and analysis applications and facilitates exploration of alternative visualization styles and modes. The system provides for multiple participants to interact in a shared environment in a natural manner. Our new human-scale user interface is intuitive and novice users require essentially no instruction to operate the visualization.",
"fno": "05543463",
"keywords": [
"Data Visualisation",
"Rendering Computer Graphics",
"User Interfaces",
"Dynamic Projection Environments",
"Immersive Visualization",
"Distributed Rendering Environment",
"Projection Surface Illumination",
"Multiple Participant Interaction",
"Human Scale User Interface",
"Data Visualization",
"Geometry",
"Rendering Computer Graphics",
"Surface Reconstruction",
"Cameras",
"Surface Texture",
"User Interfaces",
"Displays",
"Real Time Systems",
"Calibration"
],
"authors": [
{
"affiliation": "Department of Computer Science, Rensselaer Polytechnic Institute, USA",
"fullName": "Theodore C. Yapo",
"givenName": "Theodore C.",
"surname": "Yapo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, Rensselaer Polytechnic Institute, USA",
"fullName": "Yu Sheng",
"givenName": "Yu",
"surname": "Sheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, Rensselaer Polytechnic Institute, USA",
"fullName": "Joshua Nasman",
"givenName": "Joshua",
"surname": "Nasman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, Rensselaer Polytechnic Institute, USA",
"fullName": "Andrew Dolce",
"givenName": "Andrew",
"surname": "Dolce",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, Rensselaer Polytechnic Institute, USA",
"fullName": "Eric Li",
"givenName": "Eric",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, Rensselaer Polytechnic Institute, USA",
"fullName": "Barbara Cutler",
"givenName": "Barbara",
"surname": "Cutler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2010",
"issn": "2160-7508",
"isbn": "978-1-4244-7029-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05543222",
"articleId": "12OmNyoiZav",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05543466",
"articleId": "12OmNzdoMtZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2017/2937/0/2937a114",
"title": "Detecting Good Surface for Improvisatory Visual Projection",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a114/12OmNCd2roE",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04810998",
"title": "Image Blending and View Clustering for Multi-Viewer Immersive Projection Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04810998/12OmNCfSqFi",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2005/2459/0/24590014",
"title": "Enabling View-Dependent Stereoscopic Projection in Real Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2005/24590014/12OmNqIQS25",
"parentPublication": {
"id": "proceedings/ismar/2005/2459/0",
"title": "Fourth IEEE and ACM International Symposium on Mixed and Augmented Reality (ISMAR'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pbg/2005/20/0/01500311",
"title": "Attraction and projection",
"doi": null,
"abstractUrl": "/proceedings-article/pbg/2005/01500311/12OmNvlg8io",
"parentPublication": {
"id": "proceedings/pbg/2005/20/0",
"title": "Point-Based Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892246",
"title": "Enhancements to VTK enabling scientific visualization in immersive environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892246/12OmNvmG7ZP",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920224",
"title": "Visualization for Genome Function Analysis Using Immersive Projection Technology",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920224/12OmNwe2Ims",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480780",
"title": "Inexpensive Immersive Projection",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480780/12OmNzQR1rP",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2013/5049/0/5049a389",
"title": "The Effect of Stereoscopic Immersive Environments on Projection-Based Multi-dimensional Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2013/5049a389/12OmNzcPAqw",
"parentPublication": {
"id": "proceedings/iv/2013/5049/0",
"title": "2013 17th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2005/2372/2/01467579",
"title": "A projector-camera system with real-time photometric adaptation for dynamic environments",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2005/01467579/1htC67moXAs",
"parentPublication": {
"id": "proceedings/cvpr/2005/2372/2",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a174",
"title": "Real-Time Adaptive Color Correction in Dynamic Projection Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a174/1pysyl9FDhu",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAY79o0",
"title": "2009 International Conference on Artificial Intelligence and Computational Intelligence",
"acronym": "aici",
"groupId": "1003069",
"volume": "4",
"displayVolume": "4",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy6ZrZ3",
"doi": "10.1109/AICI.2009.202",
"title": "Improved on Maximum Intensity Projection",
"normalizedTitle": "Improved on Maximum Intensity Projection",
"abstract": "Maximum Intensity Projection (MIP) is one of the most common methods for the visualization of volumetric data. MIP works by traversing all viewing rays and finding the maximum data value along each of them. The main limitation of MIP is that it cannot adequately depict the spatial relationships of overlapping tissues. An approach has been proposed to eliminate this drawback: Local Maximum Intensity Projection(LMIP). However, with too low a threshold value, the first encountered local maxima are mostly noise components; with a threshold value larger than the maximum intensity among all the voxels in the 3D data, LMIP is equivalent to MIP. So the results rely on the threshold. If the threshold is low, we will not get a good result. There is no shading information in MIP and LMIP. In this paper we propose an improved local maximum intensity projection. An appropriate threshold and shading are computed in this improved method. We show that the improved method is a useful technology in volumetric dataset visualization",
"abstracts": [
{
"abstractType": "Regular",
"content": "Maximum Intensity Projection (MIP) is one of the most common methods for the visualization of volumetric data. MIP works by traversing all viewing rays and finding the maximum data value along each of them. The main limitation of MIP is that it cannot adequately depict the spatial relationships of overlapping tissues. An approach has been proposed to eliminate this drawback: Local Maximum Intensity Projection(LMIP). However, with too low a threshold value, the first encountered local maxima are mostly noise components; with a threshold value larger than the maximum intensity among all the voxels in the 3D data, LMIP is equivalent to MIP. So the results rely on the threshold. If the threshold is low, we will not get a good result. There is no shading information in MIP and LMIP. In this paper we propose an improved local maximum intensity projection. An appropriate threshold and shading are computed in this improved method. We show that the improved method is a useful technology in volumetric dataset visualization",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Maximum Intensity Projection (MIP) is one of the most common methods for the visualization of volumetric data. MIP works by traversing all viewing rays and finding the maximum data value along each of them. The main limitation of MIP is that it cannot adequately depict the spatial relationships of overlapping tissues. An approach has been proposed to eliminate this drawback: Local Maximum Intensity Projection(LMIP). However, with too low a threshold value, the first encountered local maxima are mostly noise components; with a threshold value larger than the maximum intensity among all the voxels in the 3D data, LMIP is equivalent to MIP. So the results rely on the threshold. If the threshold is low, we will not get a good result. There is no shading information in MIP and LMIP. In this paper we propose an improved local maximum intensity projection. An appropriate threshold and shading are computed in this improved method. We show that the improved method is a useful technology in volumetric dataset visualization",
"fno": "3816d491",
"keywords": [
"Direct Volume Rendering",
"Maximum Intensity Projection",
"Local Maximum Intensity Projection",
"Threshold"
],
"authors": [
{
"affiliation": null,
"fullName": "Feng Ling",
"givenName": "Feng",
"surname": "Ling",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ling Yang",
"givenName": "Ling",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aici",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-11-01T00:00:00",
"pubType": "proceedings",
"pages": "491-495",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3816-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3816d482",
"articleId": "12OmNznCkZD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3816d496",
"articleId": "12OmNqBtiHj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2001/7200/0/7200kraus",
"title": "Cell-Projection of Cyclic Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2001/7200kraus/12OmNA2cYzt",
"parentPublication": {
"id": "proceedings/ieee-vis/2001/7200/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iita/2008/3497/1/3497a486",
"title": "Edge Detection Method for Non-Diffraction Beam Image Based on Local Maximum Intensity",
"doi": null,
"abstractUrl": "/proceedings-article/iita/2008/3497a486/12OmNqNosdR",
"parentPublication": {
"id": "proceedings/iita/2008/3497/3",
"title": "2008 Second International Symposium on Intelligent Information Technology Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccet/2009/3521/1/3521a419",
"title": "A Novel Approach for Face Recognition Based on Supervised Locality Preserving Projection and Maximum Margin Criterion",
"doi": null,
"abstractUrl": "/proceedings-article/iccet/2009/3521a419/12OmNrJiCYc",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbeb/2012/4706/0/4706a736",
"title": "Application of AIP and MIP CT on Individual GTV Delineation for Tumor Moving with Respiration",
"doi": null,
"abstractUrl": "/proceedings-article/icbeb/2012/4706a736/12OmNxWLTyH",
"parentPublication": {
"id": "proceedings/icbeb/2012/4706/0",
"title": "Biomedical Engineering and Biotechnology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620443",
"title": "An interactive cerebral blood vessel exploration system",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620443/12OmNxeut7s",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/6/3507f371",
"title": "Intensity-Distance Projection Space Based Human Tracking in Far-Infrared Image Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507f371/12OmNyFCvY8",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2011/4596/0/4596a385",
"title": "Maximum Margin/Volume Outlier Detection",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2011/4596a385/12OmNzZmZuI",
"parentPublication": {
"id": "proceedings/ictai/2011/4596/0",
"title": "2011 IEEE 23rd International Conference on Tools with Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870011",
"title": "Interactive Maximum Projection Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870011/12OmNzZmZv2",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/03/v0238",
"title": "Interactive Ray Tracing for Volume Visualization",
"doi": null,
"abstractUrl": "/journal/tg/1999/03/v0238/13rRUxOdD85",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2017/3220/2/08005979",
"title": "Interactive Visualization of the Maximum Intensity Projection Method",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2017/08005979/17D45W2Wyzd",
"parentPublication": {
"id": "proceedings/cse-euc/2017/3220/2",
"title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysuGClQ9a",
"doi": "10.1109/ISMAR50242.2020.00038",
"title": "ElaMorph Projection: Deformation of 3D Shape by Dynamic Projection Mapping",
"normalizedTitle": "ElaMorph Projection: Deformation of 3D Shape by Dynamic Projection Mapping",
"abstract": "We propose a projector-based method that provides an illusion of geometry change, similar to that caused by a change in physical properties such as elasticity, in response to inertia caused by physical motion. The proposed method is named “ElaMorph projection.” Although several projection mapping methods capable of deforming targets have been previously proposed, these methods require the preparation of animations in advance. Moreover, these methods are unable to deform the shape in real-time according to actual movements of the object. To address these issues, we perform real-time geometry deformation and rendering based on the spatial motion of the object. To render the projection image, we extend the conventional method of deformation for 2D pictures or static 3D objects to include dynamic 3D objects. This study involves projection onto a dynamic 3D object; however, the projection quality decreases if a part of the rendered image extends beyond the projection target. To address this issue, the proposed algorithm ensures that the vertices after deformation always remain within the projection target. In addition, we develop a robust algorithm to generate projection images under dynamic illuminative conditions, through real-time estimation of the environmental lighting required for rendering. Moreover, using an elasticity map that can be easily constructed using a UV map, our method enables users to specify the vertices to be deformed, using an elasticity map. We present projections under several different sets of elasticity maps, environmental lighting, and elasticities. Finally, we evaluate the latency and throughput of our system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a projector-based method that provides an illusion of geometry change, similar to that caused by a change in physical properties such as elasticity, in response to inertia caused by physical motion. The proposed method is named “ElaMorph projection.” Although several projection mapping methods capable of deforming targets have been previously proposed, these methods require the preparation of animations in advance. Moreover, these methods are unable to deform the shape in real-time according to actual movements of the object. To address these issues, we perform real-time geometry deformation and rendering based on the spatial motion of the object. To render the projection image, we extend the conventional method of deformation for 2D pictures or static 3D objects to include dynamic 3D objects. This study involves projection onto a dynamic 3D object; however, the projection quality decreases if a part of the rendered image extends beyond the projection target. To address this issue, the proposed algorithm ensures that the vertices after deformation always remain within the projection target. In addition, we develop a robust algorithm to generate projection images under dynamic illuminative conditions, through real-time estimation of the environmental lighting required for rendering. Moreover, using an elasticity map that can be easily constructed using a UV map, our method enables users to specify the vertices to be deformed, using an elasticity map. We present projections under several different sets of elasticity maps, environmental lighting, and elasticities. Finally, we evaluate the latency and throughput of our system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a projector-based method that provides an illusion of geometry change, similar to that caused by a change in physical properties such as elasticity, in response to inertia caused by physical motion. The proposed method is named “ElaMorph projection.” Although several projection mapping methods capable of deforming targets have been previously proposed, these methods require the preparation of animations in advance. Moreover, these methods are unable to deform the shape in real-time according to actual movements of the object. To address these issues, we perform real-time geometry deformation and rendering based on the spatial motion of the object. To render the projection image, we extend the conventional method of deformation for 2D pictures or static 3D objects to include dynamic 3D objects. This study involves projection onto a dynamic 3D object; however, the projection quality decreases if a part of the rendered image extends beyond the projection target. To address this issue, the proposed algorithm ensures that the vertices after deformation always remain within the projection target. In addition, we develop a robust algorithm to generate projection images under dynamic illuminative conditions, through real-time estimation of the environmental lighting required for rendering. Moreover, using an elasticity map that can be easily constructed using a UV map, our method enables users to specify the vertices to be deformed, using an elasticity map. We present projections under several different sets of elasticity maps, environmental lighting, and elasticities. Finally, we evaluate the latency and throughput of our system.",
"fno": "850800a164",
"keywords": [
"Computer Animation",
"Geometry",
"Rendering Computer Graphics",
"Ela Morph Projection",
"Dynamic Projection",
"Projector Based Method",
"Geometry Change",
"Physical Properties",
"Physical Motion",
"Real Time Geometry Deformation",
"Rendering",
"Spatial Motion",
"Projection Image",
"Static 3 D Objects",
"Dynamic 3 D Objects",
"Projection Quality",
"Rendered Image",
"Projection Target",
"Dynamic Illuminative Conditions",
"Real Time Estimation",
"Elasticity Map",
"UV Map",
"3 D Shape Deformation",
"Dynamic Projection Mapping",
"Animation",
"Three Dimensional Displays",
"Shape",
"Heuristic Algorithms",
"Lighting",
"Elasticity",
"Real Time Systems",
"Strain",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Mixed Augmented Reality",
"Computing Methodologies",
"Computer Graphics",
"Animation"
],
"authors": [
{
"affiliation": "The University of Tokyo",
"fullName": "Kentaro Fukamizu",
"givenName": "Kentaro",
"surname": "Fukamizu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo",
"fullName": "Leo Miyashita",
"givenName": "Leo",
"surname": "Miyashita",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo",
"fullName": "Masatoshi Ishikawa",
"givenName": "Masatoshi",
"surname": "Ishikawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "164-173",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a156",
"articleId": "1pysxl4CVuE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a174",
"articleId": "1pysyl9FDhu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ifita/2009/3600/2/3600b217",
"title": "A Method of Analyzing the Strain of Arterial Wall",
"doi": null,
"abstractUrl": "/proceedings-article/ifita/2009/3600b217/12OmNrFkePD",
"parentPublication": {
"id": "proceedings/ifita/2009/3600/2",
"title": "2009 International Forum on Information Technology and Applications (IFITA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601a858",
"title": "DeformNet: Free-Form Deformation Network for 3D Shape Reconstruction from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601a858/12OmNyKJiqm",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a091",
"title": "Symmetric Shape Morphing for 3D Face and Head Modelling",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a091/12OmNypIYEY",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a273",
"title": "Investigation of Project Moir: Interferometry Technique on Measuring Model Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a273/12OmNz5apCt",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1999/0164/1/01640405",
"title": "Shape from Texture through Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1999/01640405/12OmNzcPApB",
"parentPublication": {
"id": "proceedings/iccv/1999/0164/1",
"title": "Proceedings of the Seventh IEEE International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101633",
"title": "Boundary-Aware Multidomain Subspace Deformation",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101633/13rRUxBJhFv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/08536417",
"title": "NNWarp: Neural Network-Based Nonlinear Deformation",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/08536417/17D45XERmmD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797923",
"title": "Material Surface Reproduction and Perceptual Deformation with Projection Mapping for Car Interior Design",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797923/1cJ0SEdW2Lm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/03/08839414",
"title": "Sparse Data Driven Mesh Deformation",
"doi": null,
"abstractUrl": "/journal/tg/2021/03/08839414/1dqsrINsJsk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1708",
"title": "Joint Learning of 3D Shape Retrieval and Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1708/1yeHS9oJtPW",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeD6SIJJrG",
"doi": "10.1109/ISMAR52148.2021.00069",
"title": "Scan&Paint: Image-based Projection Painting",
"normalizedTitle": "Scan&Paint: Image-based Projection Painting",
"abstract": "We present a pop-up projection painting system that projects onto an unknown three-dimensional surface, while the user creates the projection content on the fly. The digital paint is projected immediately and follows the object if it is moved. If unexplored surface areas are thereby exposed, an automated trigger system issues new depth recordings that expand and refine the surface estimate. By intertwining scanning and projection painting we scan the exposed surface at the appropriate time and only if needed. Like image-based rendering, multiple automatically recorded depth maps are fused in screen space to synthesize novel views of the object, making projection poses independent from the scan positions. Since the user’s digital paint is also stored in images, we eliminate the need to reconstruct and parametrize a single full mesh, which makes geometry and color updates simple and fast.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a pop-up projection painting system that projects onto an unknown three-dimensional surface, while the user creates the projection content on the fly. The digital paint is projected immediately and follows the object if it is moved. If unexplored surface areas are thereby exposed, an automated trigger system issues new depth recordings that expand and refine the surface estimate. By intertwining scanning and projection painting we scan the exposed surface at the appropriate time and only if needed. Like image-based rendering, multiple automatically recorded depth maps are fused in screen space to synthesize novel views of the object, making projection poses independent from the scan positions. Since the user’s digital paint is also stored in images, we eliminate the need to reconstruct and parametrize a single full mesh, which makes geometry and color updates simple and fast.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a pop-up projection painting system that projects onto an unknown three-dimensional surface, while the user creates the projection content on the fly. The digital paint is projected immediately and follows the object if it is moved. If unexplored surface areas are thereby exposed, an automated trigger system issues new depth recordings that expand and refine the surface estimate. By intertwining scanning and projection painting we scan the exposed surface at the appropriate time and only if needed. Like image-based rendering, multiple automatically recorded depth maps are fused in screen space to synthesize novel views of the object, making projection poses independent from the scan positions. Since the user’s digital paint is also stored in images, we eliminate the need to reconstruct and parametrize a single full mesh, which makes geometry and color updates simple and fast.",
"fno": "015800a517",
"keywords": [
"Image Colour Analysis",
"Image Fusion",
"Rendering Computer Graphics",
"Solid Modelling",
"Digital Paint",
"Surface Estimate",
"Scan Positions",
"Image Based Projection Painting",
"Three Dimensional Surface",
"Scan Amp Paint",
"Depth Maps Fusion",
"Image Based Rendering",
"Geometry",
"Surface Reconstruction",
"Three Dimensional Displays",
"Image Color Analysis",
"Rendering Computer Graphics",
"Paints",
"Image Reconstruction",
"Computing Methodologies",
"Mixed Augmented Reality",
"Image Based Rendering",
"Reconstruction"
],
"authors": [
{
"affiliation": "Friedrich-Alexander University Erlangen-Nūrnberg (FAU), Visual Computing",
"fullName": "Vanessa Klein",
"givenName": "Vanessa",
"surname": "Klein",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Friedrich-Alexander University Erlangen-Nūrnberg (FAU), Visual Computing",
"fullName": "Markus Leuschner",
"givenName": "Markus",
"surname": "Leuschner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Friedrich-Alexander University Erlangen-Nūrnberg (FAU), Visual Computing",
"fullName": "Tobias Langen",
"givenName": "Tobias",
"surname": "Langen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Friedrich-Alexander University Erlangen-Nūrnberg (FAU), Visual Computing",
"fullName": "Philipp Kurth",
"givenName": "Philipp",
"surname": "Kurth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Friedrich-Alexander University Erlangen-Nūrnberg (FAU), Visual Computing",
"fullName": "Marc Stamminger",
"givenName": "Marc",
"surname": "Stamminger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Friedrich-Alexander University Erlangen-Nūrnberg (FAU), Visual Computing",
"fullName": "Frank Bauer",
"givenName": "Frank",
"surname": "Bauer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "517-525",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeD6gi4NC8",
"name": "pismar202101580-09583790s1-mm_015800a517.zip",
"size": "44.5 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202101580-09583790s1-mm_015800a517.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "015800a508",
"articleId": "1yeD4rzUalO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a527",
"articleId": "1yeD23fVvnG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wsc/2003/8131/2/01261551",
"title": "Paint line color change reduction in automobile assembly through simulation",
"doi": null,
"abstractUrl": "/proceedings-article/wsc/2003/01261551/12OmNALlcgB",
"parentPublication": {
"id": "proceedings/wsc/2003/8131/2",
"title": "Proceedings of the 2003 Winter Simulation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2007/0905/0/04161057",
"title": "Visualizing Spray Paint Deposition in VR Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161057/12OmNAWH9CM",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2003/2028/0/20280141",
"title": "A Painting Interface for Interactive Surface Deformations",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2003/20280141/12OmNsbGvEM",
"parentPublication": {
"id": "proceedings/pg/2003/2028/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1994/6240/0/00323997",
"title": "ExTwAnPaSy: an Extensible Two-dimensional Animation/Paint System",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1994/00323997/12OmNy314fS",
"parentPublication": {
"id": "proceedings/ca/1994/6240/0",
"title": "Proceedings of Computer Animation '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050723",
"title": "Painting with Polygons: A Procedural Watercolor Engine",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050723/13rRUxBa5bY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2018/7315/0/731500a017",
"title": "LifeBrush: Painting Interactive Agent-Based Simulations",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2018/731500a017/17D45VTRonL",
"parentPublication": {
"id": "proceedings/cw/2018/7315/0",
"title": "2018 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g578",
"title": "Paint Transformer: Feed Forward Neural Painting with Stroke Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g578/1BmHtL6lzMY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150986",
"title": "Calibrated Vehicle Paint Signatures for Simulating Hyperspectral Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150986/1lPHxnukvyE",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i432",
"title": "Painting Many Pasts: Synthesizing Time Lapse Videos of Paintings",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i432/1m3nu7jSK6Q",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCuDzsp",
"title": "2011 IEEE Asia -Pacific Services Computing Conference",
"acronym": "apscc",
"groupId": "1001486",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCcKQI5",
"doi": "10.1109/APSCC.2011.68",
"title": "Interoperable Telepresence Services: Beyond HD-Videoconferences and Towards Telepresence",
"normalizedTitle": "Interoperable Telepresence Services: Beyond HD-Videoconferences and Towards Telepresence",
"abstract": "Telepresence service provides immersive and realistic presence experience to users. Even though telepresence service is no longer new terminology any more, it has not been widely deployed to users. There are several vendors that provide telepresence devices, but they do not interoperate each other because they implemented advanced feature with their own way. Hence, two major SDOs are under standardization. Besides of standardization, for the diffusion of telepresence service, there should be some strategy such as interoperation with legacy terminals. In this paper, we briefly summarize characteristics of telepresence and standardization activities and introduce a diffusion strategy model.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Telepresence service provides immersive and realistic presence experience to users. Even though telepresence service is no longer new terminology any more, it has not been widely deployed to users. There are several vendors that provide telepresence devices, but they do not interoperate each other because they implemented advanced feature with their own way. Hence, two major SDOs are under standardization. Besides of standardization, for the diffusion of telepresence service, there should be some strategy such as interoperation with legacy terminals. In this paper, we briefly summarize characteristics of telepresence and standardization activities and introduce a diffusion strategy model.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Telepresence service provides immersive and realistic presence experience to users. Even though telepresence service is no longer new terminology any more, it has not been widely deployed to users. There are several vendors that provide telepresence devices, but they do not interoperate each other because they implemented advanced feature with their own way. Hence, two major SDOs are under standardization. Besides of standardization, for the diffusion of telepresence service, there should be some strategy such as interoperation with legacy terminals. In this paper, we briefly summarize characteristics of telepresence and standardization activities and introduce a diffusion strategy model.",
"fno": "4624a327",
"keywords": [
"Telepresence",
"Videoconference",
"Standardization"
],
"authors": [
{
"affiliation": null,
"fullName": "Wook Hyun",
"givenName": "Wook",
"surname": "Hyun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shin Gak Kang",
"givenName": "Shin Gak",
"surname": "Kang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "apscc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-12-01T00:00:00",
"pubType": "proceedings",
"pages": "327-329",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4624-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4624a321",
"articleId": "12OmNwkzutK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4624a330",
"articleId": "12OmNBTawvR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isads/1995/7087/0/70870323",
"title": "A network services interface for telepresence applications",
"doi": null,
"abstractUrl": "/proceedings-article/isads/1995/70870323/12OmNvjgWnK",
"parentPublication": {
"id": "proceedings/isads/1995/7087/0",
"title": "Proceedings ISADS 95. Second International Symposium on Autonomous Decentralized Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ettandgrs/2008/3563/1/3563a768",
"title": "The Force Reflecting Telepresence Master-Slave Control System Based on Stochastic Resonance",
"doi": null,
"abstractUrl": "/proceedings-article/ettandgrs/2008/3563a768/12OmNxWcHcr",
"parentPublication": {
"id": "proceedings/ettandgrs/2008/3563/1",
"title": "Education Technology and Training & Geoscience and Remote Sensing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ettandgrs/2008/3563/1/3563a778",
"title": "Analysis of Small Time Delay Stability in Force Telepresence System",
"doi": null,
"abstractUrl": "/proceedings-article/ettandgrs/2008/3563a778/12OmNzkuKI4",
"parentPublication": {
"id": "proceedings/ettandgrs/2008/3563/1",
"title": "Education Technology and Training & Geoscience and Remote Sensing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446471",
"title": "Towards Mobile 3D Telepresence Using Head-Worn Devices and Dual-Purpose Screens",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446471/13bd1AITn9Y",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446521",
"title": "Extended Abstract: Natural Human-Robot Interaction in Virtual Reality Telepresence Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446521/13bd1ftOBCZ",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a624",
"title": "AS-EKF: A Delay Aware State Estimation Technique for Telepresence Robot Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a624/18M7h2cgDDO",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a524",
"title": "Synthesizing Novel Spaces for Remote Telepresence Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a524/1J7WaFB7xNC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2023/04/09537616",
"title": "TIUI: Touching Live Video for Telepresence Operation",
"doi": null,
"abstractUrl": "/journal/tm/2023/04/09537616/1wTinsFrkju",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a346",
"title": "Tactile Telepresence for Isolated Patients",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a346/1yeQGRM0HLi",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yfxI1Gbi4o",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00020",
"title": "Assessing Telepresence, Social Presence and Stress Response in a Virtual Reality Store",
"normalizedTitle": "Assessing Telepresence, Social Presence and Stress Response in a Virtual Reality Store",
"abstract": "The development of immersive Virtual Reality (VR) has provided users around the globe with a highly realistic virtual world experience. Since its first use, extensive research has been conducted with the attempt to understand how human behaviour in virtual environments compares to the real world. Studies have shown that people exhibit similar behaviours and reactions in a number of scenarios including virtual shopping, thus making it a promising tool for researchers to study in-store shopper behaviour. This paper outlines ideas on how store atmospherics can affect the user experience, stress levels and behaviour in a virtual store environment, developed using the Unreal Game Engine. The presence/absence of avatars can be investigated as an important aspect of store atmospherics and a potential antecedent of perceived presence in a simulated retail environment. These insights will be useful for retailers in that they can guide development and improvement of virtual simulated shopping experiences, using elements of telepresence and social presence, to enhance the consumer shopping experience and their own retail strategy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The development of immersive Virtual Reality (VR) has provided users around the globe with a highly realistic virtual world experience. Since its first use, extensive research has been conducted with the attempt to understand how human behaviour in virtual environments compares to the real world. Studies have shown that people exhibit similar behaviours and reactions in a number of scenarios including virtual shopping, thus making it a promising tool for researchers to study in-store shopper behaviour. This paper outlines ideas on how store atmospherics can affect the user experience, stress levels and behaviour in a virtual store environment, developed using the Unreal Game Engine. The presence/absence of avatars can be investigated as an important aspect of store atmospherics and a potential antecedent of perceived presence in a simulated retail environment. These insights will be useful for retailers in that they can guide development and improvement of virtual simulated shopping experiences, using elements of telepresence and social presence, to enhance the consumer shopping experience and their own retail strategy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The development of immersive Virtual Reality (VR) has provided users around the globe with a highly realistic virtual world experience. Since its first use, extensive research has been conducted with the attempt to understand how human behaviour in virtual environments compares to the real world. Studies have shown that people exhibit similar behaviours and reactions in a number of scenarios including virtual shopping, thus making it a promising tool for researchers to study in-store shopper behaviour. This paper outlines ideas on how store atmospherics can affect the user experience, stress levels and behaviour in a virtual store environment, developed using the Unreal Game Engine. The presence/absence of avatars can be investigated as an important aspect of store atmospherics and a potential antecedent of perceived presence in a simulated retail environment. These insights will be useful for retailers in that they can guide development and improvement of virtual simulated shopping experiences, using elements of telepresence and social presence, to enhance the consumer shopping experience and their own retail strategy.",
"fno": "129800a052",
"keywords": [
"Avatars",
"Consumer Behaviour",
"Electronic Commerce",
"Retail Data Processing",
"Retailing",
"Virtual Reality",
"Perceived Presence",
"Simulated Retail Environment",
"Virtual Simulated Shopping Experiences",
"Telepresence",
"Social Presence",
"Consumer Shopping Experience",
"Stress Response",
"Virtual Reality Store",
"Immersive Virtual Reality",
"Highly Realistic Virtual World Experience",
"Human Behaviour",
"Virtual Environments",
"Similar Behaviours",
"Virtual Shopping",
"In Store Shopper Behaviour",
"Store Atmospherics",
"User Experience",
"Stress Levels",
"Virtual Store Environment",
"Unreal Game Engine",
"Telepresence",
"Atmospheric Modeling",
"Avatars",
"Virtual Environments",
"Games",
"Tools",
"User Experience",
"Virtual Store",
"Virtual Reality",
"Stress Response",
"Store Atmospherics",
"Telepresence",
"Social Presence"
],
"authors": [
{
"affiliation": "Massey University,Games and Extended Reality Lab, School of Natural and Computational Sciences,Auckland,New Zealand",
"fullName": "Yinshu Zhao",
"givenName": "Yinshu",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Massey University,Games and Extended Reality Lab, School of Natural and Computational Sciences,Auckland,New Zealand",
"fullName": "Nilufar Baghaei",
"givenName": "Nilufar",
"surname": "Baghaei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Massey University,Games and Extended Reality Lab, School of Natural and Computational Sciences,Auckland,New Zealand",
"fullName": "Alexander Schnack",
"givenName": "Alexander",
"surname": "Schnack",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Massey University,Games and Extended Reality Lab, School of Natural and Computational Sciences,Auckland,New Zealand",
"fullName": "Lehan Stemmet",
"givenName": "Lehan",
"surname": "Stemmet",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "52-56",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "129800a046",
"articleId": "1yeQMDuQA0g",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a057",
"articleId": "1yeQDe05VlK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wsc/2003/8131/2/01261630",
"title": "Agent-based modeling and simulation of store performance for personalized pricing",
"doi": null,
"abstractUrl": "/proceedings-article/wsc/2003/01261630/12OmNqzu6VM",
"parentPublication": {
"id": "proceedings/wsc/2003/8131/2",
"title": "Proceedings of the 2003 Winter Simulation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icws/2009/3709/0/3709a727",
"title": "Mobile In-store Personalized Services",
"doi": null,
"abstractUrl": "/proceedings-article/icws/2009/3709a727/12OmNvDI3Qu",
"parentPublication": {
"id": "proceedings/icws/2009/3709/0",
"title": "2009 IEEE International Conference on Web Services",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2016/8779/0/07456526",
"title": "IRIS: Tapping wearable sensing to capture in-store retail insights on shoppers",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2016/07456526/12OmNxxvAHn",
"parentPublication": {
"id": "proceedings/percom/2016/8779/0",
"title": "2016 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pesos/2012/1755/0/06225942",
"title": "Constraint-based invocation of stateful web services: The beep store (case study)",
"doi": null,
"abstractUrl": "/proceedings-article/pesos/2012/06225942/12OmNyqiaQ8",
"parentPublication": {
"id": "proceedings/pesos/2012/1755/0",
"title": "2012 4th International Workshop on Principles of Engineering Service-Oriented Systems (PESOS 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08466636",
"title": "Superman vs Giant: A Study on Spatial Perception for a Multi-Scale Mixed Reality Flying Telepresence Interface",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08466636/14M3DZXcLXa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2021/3734/0/373400a204",
"title": "Social Interaction in Virtual Shopping",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a204/1A3j9ceXwC4",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714051",
"title": "Augmenting Immersive Telepresence Experience with a Virtual Body",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714051/1B0Y0I5xWyk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a654",
"title": "Bring Store in My Room: AR Store Authoring System for Spatial Experience in Mobile Shopping",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a654/1J7WqitKsAU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2022/5305/0/10089288",
"title": "Comparing Customer Behaviours: Immersive Virtual Reality Store Experiences versus Web and Physical Store Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2022/10089288/1M7LemRp5cY",
"parentPublication": {
"id": "proceedings/csde/2022/5305/0",
"title": "2022 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwDACiX",
"title": "IEEE Virtual Reality 2008",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCb3frD",
"doi": "10.1109/VR.2008.4480795",
"title": "Immersive 3D Environment for Remote Collaboration and Training of Physical Activities",
"normalizedTitle": "Immersive 3D Environment for Remote Collaboration and Training of Physical Activities",
"abstract": "In this paper we present a framework for immersive virtual environment intended for remote collaboration and training of physical activities. Our multi-camera system performs full-body 3D reconstruction of human user(s) in real time and renders their image in the virtual space allowing remote users to interact. The paper features a short overview of the technology used for the capturing and reconstruction. Some of the applications where we have successfully demonstrated use of the system in combination with the tele-immersive virtual environment are described. Finally, we address current drawbacks with regard to data capturing and networking and provide some ideas for future work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we present a framework for immersive virtual environment intended for remote collaboration and training of physical activities. Our multi-camera system performs full-body 3D reconstruction of human user(s) in real time and renders their image in the virtual space allowing remote users to interact. The paper features a short overview of the technology used for the capturing and reconstruction. Some of the applications where we have successfully demonstrated use of the system in combination with the tele-immersive virtual environment are described. Finally, we address current drawbacks with regard to data capturing and networking and provide some ideas for future work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we present a framework for immersive virtual environment intended for remote collaboration and training of physical activities. Our multi-camera system performs full-body 3D reconstruction of human user(s) in real time and renders their image in the virtual space allowing remote users to interact. The paper features a short overview of the technology used for the capturing and reconstruction. Some of the applications where we have successfully demonstrated use of the system in combination with the tele-immersive virtual environment are described. Finally, we address current drawbacks with regard to data capturing and networking and provide some ideas for future work.",
"fno": "04480795",
"keywords": [
"Image Reconstruction",
"Stereo Image Processing",
"Virtual Reality",
"Immersive 3 D Virtual Environment",
"Remote Collaboration",
"Physical Activity Training",
"Multi Camera System",
"Full Body 3 D Reconstruction",
"Collaboration",
"Image Reconstruction",
"Cameras",
"Humans",
"Real Time Systems",
"Rendering Computer Graphics",
"Space Technology",
"Avatars",
"Virtual Environment",
"Computer Vision",
"3 D Reconstruction",
"Immersion",
"Real Time Systems",
"Remote Collaboration",
"Tele Immersion",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial",
"Augmented",
"And Virtual Realities",
"I 4 10 Image Processing And Computer Vision Image Representation Volumetric"
],
"authors": [
{
"affiliation": "Univ. of California, Berkeley, e-mail: gregorij@eecs.berkeley.edu",
"fullName": "Gregorij Kurillo",
"givenName": "Gregorij",
"surname": "Kurillo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of California, Berkeley, e-mail: bajcsy@eecs.berkeley.edu",
"fullName": "Ruzena Bajcsy",
"givenName": "Ruzena",
"surname": "Bajcsy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Illinois, Urbana Champagne, e-mail: klara@cs.uiuc.edu",
"fullName": "Klara Nahrsted",
"givenName": "Klara",
"surname": "Nahrsted",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of California, Davis, e-mail: kreylos@cs.ucdavis.edu",
"fullName": "Oliver Kreylos",
"givenName": "Oliver",
"surname": "Kreylos",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-03-01T00:00:00",
"pubType": "proceedings",
"pages": "269-270",
"year": "2008",
"issn": "1087-8270",
"isbn": "978-1-4244-1971-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04480794",
"articleId": "12OmNrAMERg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04480796",
"articleId": "12OmNzwHvqB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvs/2006/2506/0/25060005",
"title": "Learning Physical Activities in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/icvs/2006/25060005/12OmNAsk4DY",
"parentPublication": {
"id": "proceedings/icvs/2006/2506/0",
"title": "Fourth IEEE International Conference on Computer Vision Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836456",
"title": "Streaming and Exploration of Dynamically Changing Dense 3D Reconstructions in Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836456/12OmNAtK4kY",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543560",
"title": "Teleimmersive 3D collaborative environment for cyberarchaeology",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543560/12OmNCyTyp7",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798870",
"title": "Poster: Immersive point cloud virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798870/12OmNqFJhV7",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pov/2011/035/0/05712368",
"title": "Augmented reality for immersive remote collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/pov/2011/05712368/12OmNqJHFHw",
"parentPublication": {
"id": "proceedings/pov/2011/035/0",
"title": "2011 Workshop on Person-Oriented Vision (POV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2011/9618/0/05718439",
"title": "Examining Work Performance in Immersive Virtual Environments versus Face-to-Face Physical Environments through Laboratory Experimentation",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2011/05718439/12OmNwIHotD",
"parentPublication": {
"id": "proceedings/hicss/2011/9618/0",
"title": "2011 44th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdpsw/2016/3682/0/3682b048",
"title": "Immersive Molecular Visualization with Omnidirectional Stereoscopic Ray Tracing and Remote Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2016/3682b048/12OmNzA6GQL",
"parentPublication": {
"id": "proceedings/ipdpsw/2016/3682/0",
"title": "2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a209",
"title": "Using HoloLens for Remote Collaboration in Extended Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a209/1KaFRbmUUFO",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09257094",
"title": "Output-Sensitive Avatar Representations for Immersive Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09257094/1oFCABrJUmA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a532",
"title": "TeleGate: Immersive Multi-User Collaboration for Mixed Reality 360°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a532/1tnXy7NpnGg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNywfKyu",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwcUjSs",
"doi": "10.1109/ISMAR.2010.5643591",
"title": "An immersive e-learning system providing virtual experience",
"normalizedTitle": "An immersive e-learning system providing virtual experience",
"abstract": "This paper introduces immersive e-learning system which provides vivid learning experience using augmented reality(AR) technology. This system gives illusion that participants feel as if they are in foreign environment by synthesizing images of participants, virtual environment, foreign-language speakers in real-time. Furthermore, surrounding virtual environment reacts to the behavior of each participant including student, local teacher, remote teacher. The system has been installed along with 10 scenarios at 14 public elementary schools and conducted during regular class time. This paper presents our motivations for the system development, a detailed design, and its contents.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces immersive e-learning system which provides vivid learning experience using augmented reality(AR) technology. This system gives illusion that participants feel as if they are in foreign environment by synthesizing images of participants, virtual environment, foreign-language speakers in real-time. Furthermore, surrounding virtual environment reacts to the behavior of each participant including student, local teacher, remote teacher. The system has been installed along with 10 scenarios at 14 public elementary schools and conducted during regular class time. This paper presents our motivations for the system development, a detailed design, and its contents.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces immersive e-learning system which provides vivid learning experience using augmented reality(AR) technology. This system gives illusion that participants feel as if they are in foreign environment by synthesizing images of participants, virtual environment, foreign-language speakers in real-time. Furthermore, surrounding virtual environment reacts to the behavior of each participant including student, local teacher, remote teacher. The system has been installed along with 10 scenarios at 14 public elementary schools and conducted during regular class time. This paper presents our motivations for the system development, a detailed design, and its contents.",
"fno": "05643591",
"keywords": [
"Augmented Reality",
"Computer Aided Instruction",
"Educational Institutions",
"Real Time Systems",
"Software Engineering",
"Immersive E Learning System",
"Virtual Experience",
"Augmented Reality",
"Illusion System",
"Public Elementary School",
"Software Development",
"Foreign Language Speakers",
"Virtual Environment",
"Image Segmentation",
"Cameras",
"Virtual Reality",
"Software",
"Gesture Recognition",
"Hardware",
"Rendering Computer Graphics",
"MR AR For Art",
"Cultural Heritage",
"Or Education And Training Primary Keyword",
"Distributed And Collaborative MR AR"
],
"authors": [
{
"affiliation": "Electronics and Telecommunications Research Institute, Korea",
"fullName": "SuWoong Lee",
"givenName": "SuWoong",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Electronics and Telecommunications Research Institute, Korea",
"fullName": "Jong-gook Ko",
"givenName": "Jong-gook",
"surname": "Ko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Electronics and Telecommunications Research Institute, Korea",
"fullName": "Seokbin Kang",
"givenName": "Seokbin",
"surname": "Kang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Electronics and Telecommunications Research Institute, Korea",
"fullName": "Junsuk Lee",
"givenName": "Junsuk",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-10-01T00:00:00",
"pubType": "proceedings",
"pages": "249-250",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-9343-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05643590",
"articleId": "12OmNs0kyvh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05643592",
"articleId": "12OmNAOKnUG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmu/2017/31/0/08330112",
"title": "Clash tanks: An investigation of virtual and augmented reality gaming experience",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2017/08330112/12OmNB8TU7d",
"parentPublication": {
"id": "proceedings/icmu/2017/31/0",
"title": "2017 Tenth International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2011/0372/0/06122644",
"title": "Constructing immersive virtual space for HAI with photos",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2011/06122644/12OmNC8dgco",
"parentPublication": {
"id": "proceedings/grc/2011/0372/0",
"title": "2011 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714051",
"title": "Augmenting Immersive Telepresence Experience with a Virtual Body",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714051/1B0Y0I5xWyk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a408",
"title": "Immersive Virtual Reality Environments: a proposal to enhance preservice teacher’s communicative competences",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a408/1FUUcqD273a",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2019/4050/0/08809591",
"title": "Immersive Gastronomic Experience with Distributed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2019/08809591/1cI62dVXsB2",
"parentPublication": {
"id": "proceedings/wevr/2019/4050/0",
"title": "2019 IEEE 5th Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv-2/2019/2850/0/285000a104",
"title": "Virtual Reality for Maritime Archaeology in 2.5D: A Virtual Dive on a Flute Wreck of 1659 in Iceland",
"doi": null,
"abstractUrl": "/proceedings-article/iv-2/2019/285000a104/1cMEQxS0ZfG",
"parentPublication": {
"id": "proceedings/iv-2/2019/2850/0",
"title": "2019 23rd International Conference in Information Visualization – Part II",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09206143",
"title": "Spatial Presence, Performance, and Behavior between Real, Remote, and Virtual Immersive Environments",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09206143/1npxM6fDN7i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a086",
"title": "Visualization of similarity queries in an immersive virtual reality environment",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a086/1rSR9H8wsE0",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811000",
"title": "Virtual Heliodon: Spatially Augmented Reality for Architectural Daylighting Design",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811000/1t0I5sIRprW",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a312",
"title": "Virtual Tourism Immersive Experience System",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a312/1vg8oAGjjhe",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNC3Xhik",
"title": "2011 IEEE/ACM 15th International Symposium on Distributed Simulation and Real Time Applications",
"acronym": "ds-rt",
"groupId": "1000218",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxWLTln",
"doi": "10.1109/DS-RT.2011.37",
"title": "Some Implications of Eye Gaze Behavior and Perception for the Design of Immersive Telecommunication Systems",
"normalizedTitle": "Some Implications of Eye Gaze Behavior and Perception for the Design of Immersive Telecommunication Systems",
"abstract": "A feature of standard video-mediated Communication systems (VMC) is that participants see into each other's spaces from the viewpoint of a camera. Consequently, participants' capacity to use the spatially-based resources that exist in co-located settings (eg the production and comprehension of pointing and eye gaze direction) can be compromised. Whilst positioning cameras close to displays, or switching or interpolating between multiple cameras to provide appropriately aligned views can reduce this problem, an alternative paradigm is the use of immersive projection technology to locate participants within an immersive collaborative virtual environment (ICVE), in which remote participants appear as 3D graphical representations. Two approaches toward representation of remote participants in ICVEs have been studied: embodied avatars animated using participants' tracked body motion, and vision-based techniques that reconstruct 3D models from multiple streams of live video input. Drawing on empirical evaluations of an avatar-based ICVE system that both captures and displays eye-movement, together with an examination of previous research into gaze, we provide a specification of gaze practices and the cues used in the perception of gaze that should be supported in ICVEs. We delineate some of the challenges for vision-based ICVE and discuss the potential for combining different approaches in the development of such systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A feature of standard video-mediated Communication systems (VMC) is that participants see into each other's spaces from the viewpoint of a camera. Consequently, participants' capacity to use the spatially-based resources that exist in co-located settings (eg the production and comprehension of pointing and eye gaze direction) can be compromised. Whilst positioning cameras close to displays, or switching or interpolating between multiple cameras to provide appropriately aligned views can reduce this problem, an alternative paradigm is the use of immersive projection technology to locate participants within an immersive collaborative virtual environment (ICVE), in which remote participants appear as 3D graphical representations. Two approaches toward representation of remote participants in ICVEs have been studied: embodied avatars animated using participants' tracked body motion, and vision-based techniques that reconstruct 3D models from multiple streams of live video input. Drawing on empirical evaluations of an avatar-based ICVE system that both captures and displays eye-movement, together with an examination of previous research into gaze, we provide a specification of gaze practices and the cues used in the perception of gaze that should be supported in ICVEs. We delineate some of the challenges for vision-based ICVE and discuss the potential for combining different approaches in the development of such systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A feature of standard video-mediated Communication systems (VMC) is that participants see into each other's spaces from the viewpoint of a camera. Consequently, participants' capacity to use the spatially-based resources that exist in co-located settings (eg the production and comprehension of pointing and eye gaze direction) can be compromised. Whilst positioning cameras close to displays, or switching or interpolating between multiple cameras to provide appropriately aligned views can reduce this problem, an alternative paradigm is the use of immersive projection technology to locate participants within an immersive collaborative virtual environment (ICVE), in which remote participants appear as 3D graphical representations. Two approaches toward representation of remote participants in ICVEs have been studied: embodied avatars animated using participants' tracked body motion, and vision-based techniques that reconstruct 3D models from multiple streams of live video input. Drawing on empirical evaluations of an avatar-based ICVE system that both captures and displays eye-movement, together with an examination of previous research into gaze, we provide a specification of gaze practices and the cues used in the perception of gaze that should be supported in ICVEs. We delineate some of the challenges for vision-based ICVE and discuss the potential for combining different approaches in the development of such systems.",
"fno": "06051786",
"keywords": [
"Eye",
"Video Communication",
"Eye Gaze Behavior",
"Eye Gaze Perception",
"Immersive Telecommunication Systems",
"Video Mediated Communication Systems",
"VMC",
"Spatially Based Resources",
"ICVE System",
"Immersive Projection Technology",
"3 D Graphical Representations",
"Vision Based Techniques",
"Cameras",
"Avatars",
"Three Dimensional Displays",
"Face",
"Tracking",
"Communications Technology",
"Educational Institutions",
"Gaze",
"Perception",
"Immersive Telecommunication",
"Virtual Reality",
"Videoconferencing",
"Eye Tracking",
"Vision Based Reconstruction"
],
"authors": [
{
"affiliation": null,
"fullName": "John P. Rae",
"givenName": "John P.",
"surname": "Rae",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "William Steptoe",
"givenName": "William",
"surname": "Steptoe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "David J. Roberts",
"givenName": "David J.",
"surname": "Roberts",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ds-rt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-09-01T00:00:00",
"pubType": "proceedings",
"pages": "108-114",
"year": "2011",
"issn": "1550-6525",
"isbn": "978-1-4577-1643-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06051785",
"articleId": "12OmNCfSqHu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06051787",
"articleId": "12OmNzyp63w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2009/3943/0/04811003",
"title": "Eye Tracking for Avatar Eye Gaze Control During Object-Focused Multiparty Interaction in Immersive Collaborative Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811003/12OmNvDqszn",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811013",
"title": "Communicating Eye-gaze Across a Distance: Comparing an Eye-gaze enabled Immersive Collaborative Virtual Environment, Aligned Video Conferencing, and Being Together",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811013/12OmNvUsooA",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2017/4283/0/4283a350",
"title": "Pholder: An Eye-Gaze Assisted Reading Application on Android",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2017/4283a350/12OmNz2kqfc",
"parentPublication": {
"id": "proceedings/sitis/2017/4283/0",
"title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ds-rt/2006/2697/0/26970070",
"title": "Comparison of head gaze and head and eye gaze within an immersive environment",
"doi": null,
"abstractUrl": "/proceedings-article/ds-rt/2006/26970070/12OmNzFdt6h",
"parentPublication": {
"id": "proceedings/ds-rt/2006/2697/0",
"title": "Distributed Simulation and Real Time Applications, IEEE/ACM International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2017/6724/0/07926555",
"title": "Gaze Tracking and Object Recognition from Eye Images",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2017/07926555/12OmNzvz6Lc",
"parentPublication": {
"id": "proceedings/irc/2017/6724/0",
"title": "2017 First IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446494",
"title": "Real-Time 3D Face Reconstruction and Gaze Tracking for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446494/13bd1eSlytf",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446215",
"title": "Gaze Guidance in Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446215/13bd1gJ1v0y",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c221",
"title": "Unraveling Human Perception of Facial Aging Using Eye Gaze",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c221/17D45WwsQ8l",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797852",
"title": "Perception of Volumetric Characters' Eye-Gaze Direction in Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797852/1cJ0UskDCRa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2020/1054/0/09191571",
"title": "Automated Filtering of Eye Gaze Metrics from Dynamic Areas of Interest",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2020/09191571/1n0IyGDlxPq",
"parentPublication": {
"id": "proceedings/iri/2020/1054/0",
"title": "2020 IEEE 21st International Conference on Information Reuse and Integration for Data Science (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgTT89hni",
"doi": "10.1109/VR55154.2023.00016",
"title": "RemoteTouch: Enhancing Immersive 3D Video Communication with Hand Touch",
"normalizedTitle": "RemoteTouch: Enhancing Immersive 3D Video Communication with Hand Touch",
"abstract": "Recent research advance has significantly improved the visual real-ism of immersive 3D video communication. In this work we present a method to further enhance this immersive experience by adding the hand touch capability (“remote hand clapping”). In our system, each meeting participant sits in front of a large screen with haptic feedback. The local participant can reach his hand out to the screen and perform hand clapping with the remote participant as if the two participants were only separated by a virtual glass. A key challenge in emulating the remote hand touch is the realistic rendering of the participant's hand and arm as the hand touches the screen. When the hand is very close to the screen, the RGBD data required for realistic rendering is no longer available. To tackle this challenge, we present a dual representation of the user's hand. Our dual representation not only preserves the high-quality rendering usually found in recent image-based rendering systems but also allows the hand to reach to the screen. This is possible because the dual representation includes both an image-based model and a 3D geometry-based model, with the latter driven by a hand skeleton tracked by a side view camera. In addition, the dual representation provides a distance-based fusion of the image-based and 3D geometry-based models as the hand moves closer to the screen. The result is that the image-based and 3D geometry-based models mutually enhance each other, leading to realistic and seamless rendering. Our experiments demonstrate that our method provides consistent hand contact experience between remote users and improves the immersive experience of 3D video communication.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent research advance has significantly improved the visual real-ism of immersive 3D video communication. In this work we present a method to further enhance this immersive experience by adding the hand touch capability (“remote hand clapping”). In our system, each meeting participant sits in front of a large screen with haptic feedback. The local participant can reach his hand out to the screen and perform hand clapping with the remote participant as if the two participants were only separated by a virtual glass. A key challenge in emulating the remote hand touch is the realistic rendering of the participant's hand and arm as the hand touches the screen. When the hand is very close to the screen, the RGBD data required for realistic rendering is no longer available. To tackle this challenge, we present a dual representation of the user's hand. Our dual representation not only preserves the high-quality rendering usually found in recent image-based rendering systems but also allows the hand to reach to the screen. This is possible because the dual representation includes both an image-based model and a 3D geometry-based model, with the latter driven by a hand skeleton tracked by a side view camera. In addition, the dual representation provides a distance-based fusion of the image-based and 3D geometry-based models as the hand moves closer to the screen. The result is that the image-based and 3D geometry-based models mutually enhance each other, leading to realistic and seamless rendering. Our experiments demonstrate that our method provides consistent hand contact experience between remote users and improves the immersive experience of 3D video communication.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent research advance has significantly improved the visual real-ism of immersive 3D video communication. In this work we present a method to further enhance this immersive experience by adding the hand touch capability (“remote hand clapping”). In our system, each meeting participant sits in front of a large screen with haptic feedback. The local participant can reach his hand out to the screen and perform hand clapping with the remote participant as if the two participants were only separated by a virtual glass. A key challenge in emulating the remote hand touch is the realistic rendering of the participant's hand and arm as the hand touches the screen. When the hand is very close to the screen, the RGBD data required for realistic rendering is no longer available. To tackle this challenge, we present a dual representation of the user's hand. Our dual representation not only preserves the high-quality rendering usually found in recent image-based rendering systems but also allows the hand to reach to the screen. This is possible because the dual representation includes both an image-based model and a 3D geometry-based model, with the latter driven by a hand skeleton tracked by a side view camera. In addition, the dual representation provides a distance-based fusion of the image-based and 3D geometry-based models as the hand moves closer to the screen. The result is that the image-based and 3D geometry-based models mutually enhance each other, leading to realistic and seamless rendering. Our experiments demonstrate that our method provides consistent hand contact experience between remote users and improves the immersive experience of 3D video communication.",
"fno": "481500a001",
"keywords": [
"Solid Modeling",
"Visualization",
"Three Dimensional Displays",
"Tracking",
"Immersive Experience",
"Glass",
"User Interfaces",
"Human Centered Computing Collaborative And Social Computing",
"Computing Methodologies Computer Graphics Graphics Systems And Interfaces Virtual Reality"
],
"authors": [
{
"affiliation": "Microsoft Research Asia",
"fullName": "Yizhong Zhang",
"givenName": "Yizhong",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University,Microsoft Research Asia",
"fullName": "Zhiqi Li",
"givenName": "Zhiqi",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia",
"fullName": "Sicheng Xu",
"givenName": "Sicheng",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia",
"fullName": "Chong Li",
"givenName": "Chong",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia",
"fullName": "Jiaolong Yang",
"givenName": "Jiaolong",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia",
"fullName": "Xin Tong",
"givenName": "Xin",
"surname": "Tong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia",
"fullName": "Baining Guo",
"givenName": "Baining",
"surname": "Guo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-10",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1MNgTPfJ2cE",
"name": "pvr202348150-010108425s1-mm_481500a001.zip",
"size": "21.1 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108425s1-mm_481500a001.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "481500z045",
"articleId": "1MNgzLntRAc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a011",
"articleId": "1MNgWRk76sU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2009/3791/0/3791a029",
"title": "Dynamic Hand Gesture Tracking and Recognition for Real-Time Immersive Virtual Object Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2009/3791a029/12OmNBpEeL1",
"parentPublication": {
"id": "proceedings/cw/2009/3791/0",
"title": "2009 International Conference on CyberWorlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444807",
"title": "Is the rubber hand illusion induced by immersive virtual reality?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444807/12OmNx4yvDy",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2006/2746/0/274600177",
"title": "A Study of Collaborative Dancing in Tele-immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2006/274600177/12OmNx7G5W3",
"parentPublication": {
"id": "proceedings/ism/2006/2746/0",
"title": "Eighth IEEE International Symposium on Multimedia (ISM'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a568",
"title": "Vibrating tilt platform enhancing immersive experience in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a568/1CJcYpd2qNq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a364",
"title": "A Study on the Presentation of Immersive Mobile Live Stream Cultural Landscape from the Perspective of Computer-Mediated Communication",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a364/1H2pmlYXL44",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a807",
"title": "Touching The Droid: Understanding and Improving Touch Precision With Mobile Devices in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a807/1JrR8xUGjpm",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a555",
"title": "Enhancing Participation Experience in VR Live Concerts by Improving Motions of Virtual Audience Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a555/1pyswu13B4Y",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a261",
"title": "Cloud mobile display and interaction framework of virtual reality 3D scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a261/1vg816YtVjG",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a451",
"title": "The Owl: Immersive Telepresence Communication for Hybrid Conferences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a451/1yeQG4fi6Dm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a075",
"title": "Immersive Experience Prototyping: Using Mixed Reality to Integrate Real Devices in Virtual Simulated Contexts to Prototype Experiences with Mobile Apps",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a075/1yfxIU5uhR6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyoiYVn",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBO3Kkf",
"doi": "10.1109/ICVRV.2015.51",
"title": "Real-Time 3D Video Acquisition and Auto-Stereoscopic Display End-to-End Algorithm Based on Tiled Multi-projectors",
"normalizedTitle": "Real-Time 3D Video Acquisition and Auto-Stereoscopic Display End-to-End Algorithm Based on Tiled Multi-projectors",
"abstract": "In order to display the real-time 3D video with more stereoscopic vision, this paper presents a novel end-to-end auto-stereoscopic tiled-projection display algorithm including dual-view video streaming acquisition, compression, transmission, and visualization. On the server side, it gets 3D video captured by dual-camera or read from dual-view 3D video files. Then, the dual-view images are transcoded into MPEG-2 /H.264 data by the Intel Quick Sync Video engine, and distributed to all rendering clients through the real-time transport protocol. On the client side, a self-calibrated method based on a slope voting policy is designed to eliminate the dual-viewpoint perspective distortion. After performing the geometric calibration and luminance correction, the dual-view images are interleaved on the optical display screen with a two-dimensional projection resolution of 3584 × 1536 pixels. Experimental results have validated that the projection system can bring viewers much better glasses-free stereoscopic vision at above 21 × 2 frames per second.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In order to display the real-time 3D video with more stereoscopic vision, this paper presents a novel end-to-end auto-stereoscopic tiled-projection display algorithm including dual-view video streaming acquisition, compression, transmission, and visualization. On the server side, it gets 3D video captured by dual-camera or read from dual-view 3D video files. Then, the dual-view images are transcoded into MPEG-2 /H.264 data by the Intel Quick Sync Video engine, and distributed to all rendering clients through the real-time transport protocol. On the client side, a self-calibrated method based on a slope voting policy is designed to eliminate the dual-viewpoint perspective distortion. After performing the geometric calibration and luminance correction, the dual-view images are interleaved on the optical display screen with a two-dimensional projection resolution of 3584 × 1536 pixels. Experimental results have validated that the projection system can bring viewers much better glasses-free stereoscopic vision at above 21 × 2 frames per second.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In order to display the real-time 3D video with more stereoscopic vision, this paper presents a novel end-to-end auto-stereoscopic tiled-projection display algorithm including dual-view video streaming acquisition, compression, transmission, and visualization. On the server side, it gets 3D video captured by dual-camera or read from dual-view 3D video files. Then, the dual-view images are transcoded into MPEG-2 /H.264 data by the Intel Quick Sync Video engine, and distributed to all rendering clients through the real-time transport protocol. On the client side, a self-calibrated method based on a slope voting policy is designed to eliminate the dual-viewpoint perspective distortion. After performing the geometric calibration and luminance correction, the dual-view images are interleaved on the optical display screen with a two-dimensional projection resolution of 3584 × 1536 pixels. Experimental results have validated that the projection system can bring viewers much better glasses-free stereoscopic vision at above 21 × 2 frames per second.",
"fno": "7673a318",
"keywords": [
"Three Dimensional Displays",
"Streaming Media",
"Acceleration",
"Rendering Computer Graphics",
"Real Time Systems",
"Stereo Image Processing",
"Servers",
"Tiled Projection Auto Stereoscopic Display",
"3 D Video Capture",
"Streaming",
"Intel Quick Sync Video"
],
"authors": [
{
"affiliation": null,
"fullName": "Huayuan Guo",
"givenName": "Huayuan",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kaihuai Qin",
"givenName": "Kaihuai",
"surname": "Qin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Feng Sun",
"givenName": "Feng",
"surname": "Sun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-10-01T00:00:00",
"pubType": "proceedings",
"pages": "318-323",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7673-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7673a312",
"articleId": "12OmNAkWvJL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7673a324",
"articleId": "12OmNyrIaC7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2008/2570/0/04607477",
"title": "Frame concealment algorithm for stereoscopic video using motion vector sharing",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607477/12OmNBOCWrU",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/3/01326651",
"title": "MPEG-4 based stereoscopic video sequences encoder",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326651/12OmNBTs7vG",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2015/7343/0/07314285",
"title": "A Simple Objective Method for Automatic Error Detection in Stereoscopic 3D Video",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2015/07314285/12OmNC3FG9L",
"parentPublication": {
"id": "proceedings/bdva/2015/7343/0",
"title": "2015 Big Data Visual Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2006/0366/0/04037063",
"title": "End-to-End Stereoscopic Video Streaming System",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2006/04037063/12OmNx19k2Z",
"parentPublication": {
"id": "proceedings/icme/2006/0366/0",
"title": "2006 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a073",
"title": "Joint Subspace Stabilization for Stereoscopic Video",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a073/12OmNxT56Af",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a515",
"title": "Subjective Crosstalk Assessment Methodology for Auto-stereoscopic Displays",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a515/12OmNzICEDS",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012099",
"title": "MVC based scalable codec enhancing frame-compatible stereoscopic video",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012099/12OmNzVGcCN",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446222",
"title": "A Method of View-Dependent Stereoscopic Projection on Curved Screen",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446222/13bd1gCd7Sx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b655",
"title": "Warping-Based Stereoscopic 3D Video Retargeting With Depth Remapping",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b655/18j8LvV2AJG",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ubi-media/2019/2820/0/282000a140",
"title": "An Adaptive Stereoscopic Video Streaming Mechanism Using MV-HEVC and 3D-HEVC Technologies",
"doi": null,
"abstractUrl": "/proceedings-article/ubi-media/2019/282000a140/1iESfgV01ry",
"parentPublication": {
"id": "proceedings/ubi-media/2019/2820/0",
"title": "2019 Twelfth International Conference on Ubi-Media Computing (Ubi-Media)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKisa",
"title": "2018 IEEE International Symposium on Multimedia (ISM)",
"acronym": "ism",
"groupId": "1001094",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WrVfZo",
"doi": "10.1109/ISM.2018.00016",
"title": "Edge-Assisted Rendering of 360° Videos Streamed to Head-Mounted Virtual Reality",
"normalizedTitle": "Edge-Assisted Rendering of 360° Videos Streamed to Head-Mounted Virtual Reality",
"abstract": "Over the past years, 360° video streaming is getting popular. Watching these videos with Head-Mounted Displays (HMDs), also known as Virtual Reality (VR) headsets, gives more immersive experience than using traditional planar monitors. To fulfill a real immersive experience, there are several challenges, such as high bandwidth consumption, latency-sensitive, and heterogeneous HMD devices. In this paper, we propose an edge-assisted 360° video streaming system, which leverages edge servers to render viewports for viewers of 360° videos. We formulate an optimization problem to determine which HMD clients should be served by the edge server. We design an algorithm to solve this problem, and implement a real testbed as a proof-of-concept. The resulting edge-assisted 360° video streaming system is extensively evaluated with a public 360° viewing dataset. Leveraging edge servers, we reduce the bandwidth usage and computational workload on HMD clients. Moreover, lower network latency is achieved. The evaluation results show that compared to current 360° video streaming platforms, our edge-assisted rendering platform: (i) saves up to 62% in bandwidth consumption, (ii) achieves higher viewing quality, (iii) reduces the computation workload for those lightweight HMDs, and (iv) saves the battery life of HMD clients.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Over the past years, 360° video streaming is getting popular. Watching these videos with Head-Mounted Displays (HMDs), also known as Virtual Reality (VR) headsets, gives more immersive experience than using traditional planar monitors. To fulfill a real immersive experience, there are several challenges, such as high bandwidth consumption, latency-sensitive, and heterogeneous HMD devices. In this paper, we propose an edge-assisted 360° video streaming system, which leverages edge servers to render viewports for viewers of 360° videos. We formulate an optimization problem to determine which HMD clients should be served by the edge server. We design an algorithm to solve this problem, and implement a real testbed as a proof-of-concept. The resulting edge-assisted 360° video streaming system is extensively evaluated with a public 360° viewing dataset. Leveraging edge servers, we reduce the bandwidth usage and computational workload on HMD clients. Moreover, lower network latency is achieved. The evaluation results show that compared to current 360° video streaming platforms, our edge-assisted rendering platform: (i) saves up to 62% in bandwidth consumption, (ii) achieves higher viewing quality, (iii) reduces the computation workload for those lightweight HMDs, and (iv) saves the battery life of HMD clients.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Over the past years, 360° video streaming is getting popular. Watching these videos with Head-Mounted Displays (HMDs), also known as Virtual Reality (VR) headsets, gives more immersive experience than using traditional planar monitors. To fulfill a real immersive experience, there are several challenges, such as high bandwidth consumption, latency-sensitive, and heterogeneous HMD devices. In this paper, we propose an edge-assisted 360° video streaming system, which leverages edge servers to render viewports for viewers of 360° videos. We formulate an optimization problem to determine which HMD clients should be served by the edge server. We design an algorithm to solve this problem, and implement a real testbed as a proof-of-concept. The resulting edge-assisted 360° video streaming system is extensively evaluated with a public 360° viewing dataset. Leveraging edge servers, we reduce the bandwidth usage and computational workload on HMD clients. Moreover, lower network latency is achieved. The evaluation results show that compared to current 360° video streaming platforms, our edge-assisted rendering platform: (i) saves up to 62% in bandwidth consumption, (ii) achieves higher viewing quality, (iii) reduces the computation workload for those lightweight HMDs, and (iv) saves the battery life of HMD clients.",
"fno": "685700a044",
"keywords": [
"Distributed Processing",
"Helmet Mounted Displays",
"Optimisation",
"Rendering Computer Graphics",
"Video Streaming",
"Virtual Reality",
"Heterogeneous HMD Devices",
"Edge Server",
"Edge Assisted Rendering Platform",
"Head Mounted Virtual Reality",
"Head Mounted Displays",
"Virtual Reality Headsets",
"Immersive Experience",
"Planar Monitors",
"360 Video Streaming Platforms",
"Edge Assisted 360 Video Streaming System",
"Latency Sensitive",
"Videos",
"Servers",
"Resists",
"Bandwidth",
"Streaming Media",
"Rendering Computer Graphics",
"Encoding",
"Edge Computing Tiled Streaming Head Mounted Display Resource Allocation Omnidirectional Videos"
],
"authors": [
{
"affiliation": null,
"fullName": "Wen-Chih Lo",
"givenName": "Wen-Chih",
"surname": "Lo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chih-Yuan Huang",
"givenName": "Chih-Yuan",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Cheng-Hsin Hsu",
"givenName": "Cheng-Hsin",
"surname": "Hsu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ism",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-12-01T00:00:00",
"pubType": "proceedings",
"pages": "44-51",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6857-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "685700a036",
"articleId": "17D45WaTkdm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "685700a052",
"articleId": "17D45XeKguI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2016/4571/0/4571a583",
"title": "Viewport-Adaptive Encoding and Streaming of 360-Degree Video for Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a583/12OmNzsJ7Ig",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486537",
"title": "A Subjective Study of Viewer Navigation Behaviors When Watching 360-Degree Videos on Computers",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486537/14jQfTvagGm",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2018/3227/0/08480355",
"title": "Optimizing 360° Video Streaming to Head-Mounted Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2018/08480355/17D45VsBU5i",
"parentPublication": {
"id": "proceedings/percomw/2018/3227/0",
"title": "2018 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wowmom/2022/0876/0/087600a281",
"title": "Head Movement-aware MPEG-DASH SRD-based 360° Video VR Streaming System over Wireless Network",
"doi": null,
"abstractUrl": "/proceedings-article/wowmom/2022/087600a281/1FHqcfLbws0",
"parentPublication": {
"id": "proceedings/wowmom/2022/0876/0",
"title": "2022 IEEE 23rd International Symposium on a World of Wireless, Mobile and Multimedia Networks (WoWMoM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2022/9548/0/954800a274",
"title": "Rate-Adaptive Streaming of 360-Degree Videos with Head-Motion-Aware Viewport Margins",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2022/954800a274/1GvddU2H1Pq",
"parentPublication": {
"id": "proceedings/mipr/2022/9548/0",
"title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2022/7172/0/717200a121",
"title": "Semantic-Aware View Prediction for 360-Degree Videos at the 5G Edge",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2022/717200a121/1KaHLcFruNO",
"parentPublication": {
"id": "proceedings/ism/2022/7172/0",
"title": "2022 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifip-networking/2019/16/0/08999460",
"title": "Advancing user quality of experience in 360-degree video streaming",
"doi": null,
"abstractUrl": "/proceedings-article/ifip-networking/2019/08999460/1hHLyJf1thC",
"parentPublication": {
"id": "proceedings/ifip-networking/2019/16/0",
"title": "2019 IFIP Networking Conference (IFIP Networking)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a082",
"title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a371",
"title": "Annotation Tool for Precise Emotion Ground Truth Label Acquisition while Watching 360° VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a371/1qpzCZXhpS0",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/04/09487520",
"title": "Saliency Computation for Virtual Cinematography in 360° Videos",
"doi": null,
"abstractUrl": "/magazine/cg/2021/04/09487520/1vg3jOq7WvK",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1o56xuliEpi",
"title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)",
"acronym": "bigmm",
"groupId": "1808144",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1o56xQvqN5C",
"doi": "10.1109/BigMM50055.2020.00035",
"title": "A Survey of Volumetric Content Streaming Approaches",
"normalizedTitle": "A Survey of Volumetric Content Streaming Approaches",
"abstract": "Volumetric content is an important enabler for a wide range of applications such as immersive real-time 3D communications and virtual reality content viewing with interactive parallax. While nowadays there is more and more hardware that captures and presents 3D representations of the world, streaming these representations, known as volumetric content, is a key problem to be addressed. Major challenges are related to the transfer of large amounts of unstructured 3D data over bandwidth-limited networks, instant response to users' behavior, i.e. latency compensation, as well as computational complexity at both the server and client devices. To provide an overview of studies conducted in the field of volumetric content streaming, we research relevant literature, summarize different streaming schemes related to this focus. This paper provides a discussion of the challenges of volumetric content streaming, and an overview of the representative volumetric content streaming approaches proposed in the literature to date. Future directions and areas requiring further research are also discussed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Volumetric content is an important enabler for a wide range of applications such as immersive real-time 3D communications and virtual reality content viewing with interactive parallax. While nowadays there is more and more hardware that captures and presents 3D representations of the world, streaming these representations, known as volumetric content, is a key problem to be addressed. Major challenges are related to the transfer of large amounts of unstructured 3D data over bandwidth-limited networks, instant response to users' behavior, i.e. latency compensation, as well as computational complexity at both the server and client devices. To provide an overview of studies conducted in the field of volumetric content streaming, we research relevant literature, summarize different streaming schemes related to this focus. This paper provides a discussion of the challenges of volumetric content streaming, and an overview of the representative volumetric content streaming approaches proposed in the literature to date. Future directions and areas requiring further research are also discussed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Volumetric content is an important enabler for a wide range of applications such as immersive real-time 3D communications and virtual reality content viewing with interactive parallax. While nowadays there is more and more hardware that captures and presents 3D representations of the world, streaming these representations, known as volumetric content, is a key problem to be addressed. Major challenges are related to the transfer of large amounts of unstructured 3D data over bandwidth-limited networks, instant response to users' behavior, i.e. latency compensation, as well as computational complexity at both the server and client devices. To provide an overview of studies conducted in the field of volumetric content streaming, we research relevant literature, summarize different streaming schemes related to this focus. This paper provides a discussion of the challenges of volumetric content streaming, and an overview of the representative volumetric content streaming approaches proposed in the literature to date. Future directions and areas requiring further research are also discussed.",
"fno": "09232568",
"keywords": [
"Client Server Systems",
"Computational Complexity",
"Computational Geometry",
"Real Time Systems",
"Video Streaming",
"Virtual Reality",
"Computational Complexity",
"Latency Compensation",
"User Behavior",
"Bandwidth Limited Networks",
"Interactive Parallax",
"Immersive Real Time 3 D Communications",
"Representative Volumetric Content Streaming Approaches",
"Streaming Schemes",
"Unstructured 3 D Data",
"Virtual Reality Content Viewing",
"Three Dimensional Displays",
"Streaming Media",
"Media",
"Rendering Computer Graphics",
"Bandwidth",
"Quality Of Experience",
"Hardware",
"Point Cloud",
"Streaming",
"Volumetric Video",
"Inter Active Media"
],
"authors": [
{
"affiliation": "Technical University of Darmstadt,Multimedia Communications Lab,Darmstadt,Germany",
"fullName": "Yassin Alkhalili",
"givenName": "Yassin",
"surname": "Alkhalili",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Darmstadt,Multimedia Communications Lab,Darmstadt,Germany",
"fullName": "Tobias Meuser",
"givenName": "Tobias",
"surname": "Meuser",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Darmstadt,Multimedia Communications Lab,Darmstadt,Germany",
"fullName": "Ralf Steinmetz",
"givenName": "Ralf",
"surname": "Steinmetz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bigmm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-09-01T00:00:00",
"pubType": "proceedings",
"pages": "191-199",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9325-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09232668",
"articleId": "1o56yFp1s6Q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09232647",
"articleId": "1o56AY0bwuk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icnc/2015/6959/0/07069329",
"title": "Content placement for video streaming over cellular networks",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2015/07069329/12OmNAKM010",
"parentPublication": {
"id": "proceedings/icnc/2015/6959/0",
"title": "2015 International Conference on Computing, Networking and Communications (ICNC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itcc/2001/1062/0/10620005",
"title": "Reactive and Proactive Approaches to Media Streaming: From Scalable Coding to Content Delivery Networks",
"doi": null,
"abstractUrl": "/proceedings-article/itcc/2001/10620005/12OmNCbU3bT",
"parentPublication": {
"id": "proceedings/itcc/2001/1062/0",
"title": "Proceedings International Conference on Information Technology: Coding and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icndc/2012/4832/0/06386696",
"title": "Live Streaming with Content Centric Networking",
"doi": null,
"abstractUrl": "/proceedings-article/icndc/2012/06386696/12OmNz61dvJ",
"parentPublication": {
"id": "proceedings/icndc/2012/4832/0",
"title": "2012 Third International Conference on Networking and Distributed Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012028",
"title": "Distributed & adaptive HTTP streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012028/12OmNzX6cpb",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wowmom/2018/4725/0/08449767",
"title": "QoE Degradation Attack in Dynamic Adaptive Streaming Over ICN",
"doi": null,
"abstractUrl": "/proceedings-article/wowmom/2018/08449767/13bd1fWcuDb",
"parentPublication": {
"id": "proceedings/wowmom/2018/4725/0",
"title": "2018 IEEE 19th International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08269373",
"title": "Gaze-Aware Streaming Solutions for the Next Generation of Mobile VR Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08269373/13rRUIIVlcR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2018/6857/0/685700a089",
"title": "HTTP/2-Based Streaming Solutions for Tiled Omnidirectional Videos",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2018/685700a089/17D45We0UCp",
"parentPublication": {
"id": "proceedings/ism/2018/6857/0",
"title": "2018 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2022/1332/0/09687168",
"title": "Survey on Reinforcement Learning Approaches for Cache-Assisted Video Streaming System",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2022/09687168/1AtQ3BgZjTG",
"parentPublication": {
"id": "proceedings/icoin/2022/1332/0",
"title": "2022 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a173",
"title": "CaV3: Cache-assisted Viewport Adaptive Volumetric Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a173/1MNgV4ZxSQ8",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2020/1485/0/09106055",
"title": "Towards View-Aware Adaptive Streaming of Holographic Content",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2020/09106055/1kwqzP7ns64",
"parentPublication": {
"id": "proceedings/icmew/2020/1485/0",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1olHxhzZTC8",
"title": "2020 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1olHxrrZ43e",
"doi": "10.1109/CW49994.2020.00011",
"title": "High Performance Texture Streaming and Rendering of Large Textured 3D Cities",
"normalizedTitle": "High Performance Texture Streaming and Rendering of Large Textured 3D Cities",
"abstract": "We introduce a novel, high performing, bandwidth-aware texture streaming system for progressive texturing of buildings in large 3D cities, with optional texture pre-processing. We seek to maintain high and consistent texture streaming performance across different city datasets, and to address the high memory binding latency in hardware virtual textures. We adopt the sparse partially-resident image to cache mesh textures at runtime and propose to allocate memory persistently, based on mesh visibility weightings and estimated GPU bandwidth. We also retain high quality rendering by minimizing texture pop-ins when transitioning between texture mipmaps. We evaluate our texture streaming system on large city datasets, including a tile-based dataset with 56K large atlases and a dataset containing 5.7M individual textures. Results indicate fast and robust streaming and rendering performance with minimal pop-in artifacts suitable for real-time rendering of large 3D cities.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce a novel, high performing, bandwidth-aware texture streaming system for progressive texturing of buildings in large 3D cities, with optional texture pre-processing. We seek to maintain high and consistent texture streaming performance across different city datasets, and to address the high memory binding latency in hardware virtual textures. We adopt the sparse partially-resident image to cache mesh textures at runtime and propose to allocate memory persistently, based on mesh visibility weightings and estimated GPU bandwidth. We also retain high quality rendering by minimizing texture pop-ins when transitioning between texture mipmaps. We evaluate our texture streaming system on large city datasets, including a tile-based dataset with 56K large atlases and a dataset containing 5.7M individual textures. Results indicate fast and robust streaming and rendering performance with minimal pop-in artifacts suitable for real-time rendering of large 3D cities.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce a novel, high performing, bandwidth-aware texture streaming system for progressive texturing of buildings in large 3D cities, with optional texture pre-processing. We seek to maintain high and consistent texture streaming performance across different city datasets, and to address the high memory binding latency in hardware virtual textures. We adopt the sparse partially-resident image to cache mesh textures at runtime and propose to allocate memory persistently, based on mesh visibility weightings and estimated GPU bandwidth. We also retain high quality rendering by minimizing texture pop-ins when transitioning between texture mipmaps. We evaluate our texture streaming system on large city datasets, including a tile-based dataset with 56K large atlases and a dataset containing 5.7M individual textures. Results indicate fast and robust streaming and rendering performance with minimal pop-in artifacts suitable for real-time rendering of large 3D cities.",
"fno": "649700a017",
"keywords": [
"Data Visualisation",
"Graphics Processing Units",
"Image Texture",
"Rendering Computer Graphics",
"High Performance Texture Streaming",
"Textured 3 D Cities",
"Bandwidth Aware Texture",
"Optional Texture Pre Processing",
"City Datasets",
"High Memory Binding",
"Hardware Virtual Textures",
"Mesh Textures",
"Mesh Visibility Weightings",
"High Quality Rendering",
"Texture Mipmaps",
"Texture Streaming System",
"Tile Based Dataset",
"Robust Streaming",
"Texture Pop Ins",
"Texture Streaming Performance",
"Three Dimensional Displays",
"Runtime",
"Urban Areas",
"Streaming Media",
"Rendering Computer Graphics",
"Real Time Systems",
"Hardware",
"Real Time Rendering",
"Texture Streaming",
"Texture Atlas",
"3 D Cities"
],
"authors": [
{
"affiliation": "Fraunhofer Singapore,Singapore",
"fullName": "Alex Zhang",
"givenName": "Alex",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fraunhofer Singapore,Singapore",
"fullName": "Kan Chen",
"givenName": "Kan",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,Fraunhofer IDM@NTU,Singapore",
"fullName": "Henry Johan",
"givenName": "Henry",
"surname": "Johan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,Fraunhofer Singapore,Singapore",
"fullName": "Marius Erdt",
"givenName": "Marius",
"surname": "Erdt",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-09-01T00:00:00",
"pubType": "proceedings",
"pages": "17-24",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6497-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "649700a009",
"articleId": "1olHyuI1o6Q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "649700a025",
"articleId": "1olHy0jO4GA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vv/2002/7641/0/76410115",
"title": "Accelerating Volume Rendering with Texture Hulls",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2002/76410115/12OmNB6D70H",
"parentPublication": {
"id": "proceedings/vv/2002/7641/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720359",
"title": "Texture-Based Wireframe Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720359/12OmNwCaCvX",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esiat/2009/3682/2/3682b575",
"title": "Rapid Texture-based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/esiat/2009/3682b575/12OmNx7G5VW",
"parentPublication": {
"id": "esiat/2009/3682/2",
"title": "Environmental Science and Information Application Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscid/2010/4198/1/4198a007",
"title": "Haptic Texture Rendering Using Single Texture Image",
"doi": null,
"abstractUrl": "/proceedings-article/iscid/2010/4198a007/12OmNxbmSBq",
"parentPublication": {
"id": "proceedings/iscid/2010/4198/1",
"title": "Computational Intelligence and Design, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1988/9999/1/00754002",
"title": "Rendering Of Texture On 3D Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1988/00754002/12OmNxxNbPS",
"parentPublication": {
"id": "proceedings/acssc/1988/9999/1",
"title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532798",
"title": "View-dependent rendering of multiresolution texture-atlases",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532798/12OmNyYDDCK",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciap/1999/0040/0/00401055",
"title": "Texture Extraction from Photographs and Rendering with Dynamic Texture Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/iciap/1999/00401055/12OmNz61drx",
"parentPublication": {
"id": "proceedings/iciap/1999/0040/0",
"title": "Image Analysis and Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660028",
"title": "View-Dependent Rendering of Multiresolution Texture-Atlases",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660028/12OmNzXWZDD",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/20300032",
"title": "Chameleon: An Interactive Texture-based Rendering Framework for Visualizing Three-dimensional Vector Fields",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300032/12OmNzh5z0U",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacific-graphics/2010/4205/0/4205a014",
"title": "Space-Optimized Texture Atlases for 3D Scenes with Per-polygon Textures",
"doi": null,
"abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a014/12OmNzmtWw0",
"parentPublication": {
"id": "proceedings/pacific-graphics/2010/4205/0",
"title": "Pacific Conference on Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNsbGvCU",
"title": "2017 IEEE 16th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"acronym": "icci*cc",
"groupId": "1000097",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAOKnSC",
"doi": "10.1109/ICCI-CC.2017.8109784",
"title": "Estimation of biomarkers for autism and its co-morbidities using resting state EEG",
"normalizedTitle": "Estimation of biomarkers for autism and its co-morbidities using resting state EEG",
"abstract": "Autism Spectrum Disorder(ASD) is a collection of heterogeneous disorders with prevalent cognitive and behavioral abnormalities. ASD is generally considered a life-long disability occurring as a stand-alone disorder but it occurs with possible co-morbid conditions such as Attention Deficit Hyperactivity Disorder (ADHD), epilepsy, Obsessive-Compulsive Disorder (OCD), Anxiety etc., Electroencephalography (EEG) studies have been identified as one of the most widely used tool for assessing the cognitive functions. Literature suggests strong evidences of stable pattern of EEG associated with Autism Spectrum Disorder. But the understanding of the subtle variations between various co-morbidities and the pathophysiology behind it needs appropriate signal processing routines. Hence, this work focuses on the identification of electrophysiological biomarkers from the acquired EEG signals of low-functioning autistic children to distinguish between the various co-morbidities of autism. Results show that the sub band power and coherence parameters estimated from segmented resting state EEG waveforms are capable of differentiating the various subgroups under consideration. The identified biomarkers can thus act as supportive tools for the physician in clinically assessing the Autistic children of different groups and to define the various training approaches for children who differ widely.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Autism Spectrum Disorder(ASD) is a collection of heterogeneous disorders with prevalent cognitive and behavioral abnormalities. ASD is generally considered a life-long disability occurring as a stand-alone disorder but it occurs with possible co-morbid conditions such as Attention Deficit Hyperactivity Disorder (ADHD), epilepsy, Obsessive-Compulsive Disorder (OCD), Anxiety etc., Electroencephalography (EEG) studies have been identified as one of the most widely used tool for assessing the cognitive functions. Literature suggests strong evidences of stable pattern of EEG associated with Autism Spectrum Disorder. But the understanding of the subtle variations between various co-morbidities and the pathophysiology behind it needs appropriate signal processing routines. Hence, this work focuses on the identification of electrophysiological biomarkers from the acquired EEG signals of low-functioning autistic children to distinguish between the various co-morbidities of autism. Results show that the sub band power and coherence parameters estimated from segmented resting state EEG waveforms are capable of differentiating the various subgroups under consideration. The identified biomarkers can thus act as supportive tools for the physician in clinically assessing the Autistic children of different groups and to define the various training approaches for children who differ widely.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Autism Spectrum Disorder(ASD) is a collection of heterogeneous disorders with prevalent cognitive and behavioral abnormalities. ASD is generally considered a life-long disability occurring as a stand-alone disorder but it occurs with possible co-morbid conditions such as Attention Deficit Hyperactivity Disorder (ADHD), epilepsy, Obsessive-Compulsive Disorder (OCD), Anxiety etc., Electroencephalography (EEG) studies have been identified as one of the most widely used tool for assessing the cognitive functions. Literature suggests strong evidences of stable pattern of EEG associated with Autism Spectrum Disorder. But the understanding of the subtle variations between various co-morbidities and the pathophysiology behind it needs appropriate signal processing routines. Hence, this work focuses on the identification of electrophysiological biomarkers from the acquired EEG signals of low-functioning autistic children to distinguish between the various co-morbidities of autism. Results show that the sub band power and coherence parameters estimated from segmented resting state EEG waveforms are capable of differentiating the various subgroups under consideration. The identified biomarkers can thus act as supportive tools for the physician in clinically assessing the Autistic children of different groups and to define the various training approaches for children who differ widely.",
"fno": "08109784",
"keywords": [
"Electroencephalography",
"Autism",
"Electrodes",
"Biomarkers",
"Coherence",
"Epilepsy",
"Protocols",
"Autism",
"Co Morbidities",
"EEG",
"Resting State",
"Biomarkers"
],
"authors": [
{
"affiliation": "Department of Biomedical Engineering, SSN College of Engineering, Chennai, India",
"fullName": "K. Vishnu Priya",
"givenName": "K. Vishnu",
"surname": "Priya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Biomedical Engineering, SSN College of Engineering, Chennai, India",
"fullName": "A. Kavitha",
"givenName": "A.",
"surname": "Kavitha",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icci*cc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "431-437",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-0771-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08109783",
"articleId": "12OmNzVGcEH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08109785",
"articleId": "12OmNvUaNkH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2016/1269/0/07760073",
"title": "Autism Barta — A smart device based automated autism screening tool for Bangladesh",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2016/07760073/12OmNAlNiL1",
"parentPublication": {
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726798",
"title": "Considerations in Autism therapy using robotics",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726798/12OmNvm6VGY",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2013/0820/0/06632634",
"title": "Autism Spectrum Disorder children interaction skills measurement using computer games",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2013/06632634/12OmNwswg44",
"parentPublication": {
"id": "proceedings/cgames/2013/0820/0",
"title": "2013 18th International Conference on Computer Games: AI, Animation, Mobile, Interactive Multimedia, Educational & Serious Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itng/2007/2776/0/04151694",
"title": "Exploration of Autism Expert Systems",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2007/04151694/12OmNzhELlO",
"parentPublication": {
"id": "proceedings/itng/2007/2776/0",
"title": "2007 4th International Conference on Information Technology New Generations",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2012/02/mex2012020052",
"title": "Incorporating a Robot into an Autism Therapy Team",
"doi": null,
"abstractUrl": "/magazine/ex/2012/02/mex2012020052/13rRUIJcWsP",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020265",
"title": "ASDCLAIMS: Twitter Dataset of Claims on Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020265/1KfSOPvS0vu",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2019/1337/0/133700a151",
"title": "Analysis of Temporal Relationships between ASD and Brain Activity through EEG and Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2019/133700a151/1eEUMerWJ0I",
"parentPublication": {
"id": "proceedings/iri/2019/1337/0",
"title": "2019 IEEE 20th International Conference on Information Reuse and Integration for Data Science (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2019/1419/0/09146086",
"title": "Cognitive Attention in Autism using Virtual Reality Learning Tool",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2019/09146086/1lFJdBX68Ks",
"parentPublication": {
"id": "proceedings/icci*cc/2019/1419/0",
"title": "2019 IEEE 18th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313270",
"title": "EEG Based Depression Recognition by Combining Functional Brain Network and Traditional Biomarkers",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313270/1qmg9EBgOOc",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2020/1974/0/09411531",
"title": "Autism Spectrum Disorder Detection in Toddlers for Early Diagnosis Using Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2020/09411531/1taF6KpeD04",
"parentPublication": {
"id": "proceedings/csde/2020/1974/0",
"title": "2020 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBNM94a",
"title": "2014 IEEE 14th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwCJOQ9",
"doi": "10.1109/ICALT.2014.177",
"title": "VLSS -- Virtual Learning and Social Stories for Children with Autism",
"normalizedTitle": "VLSS -- Virtual Learning and Social Stories for Children with Autism",
"abstract": "This paper presents the design, implementation and educational use of a virtual learning environment which supports children with Autism Spectrum Disorders (ASD) to learn to solve social problems presented in the format of Social Stories. The pilot evaluation revealed that the environment has the potential to be a beneficial and easy-to-use educational tool for teaching social problem solving to children with ASD.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents the design, implementation and educational use of a virtual learning environment which supports children with Autism Spectrum Disorders (ASD) to learn to solve social problems presented in the format of Social Stories. The pilot evaluation revealed that the environment has the potential to be a beneficial and easy-to-use educational tool for teaching social problem solving to children with ASD.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents the design, implementation and educational use of a virtual learning environment which supports children with Autism Spectrum Disorders (ASD) to learn to solve social problems presented in the format of Social Stories. The pilot evaluation revealed that the environment has the potential to be a beneficial and easy-to-use educational tool for teaching social problem solving to children with ASD.",
"fno": "4038a606",
"keywords": [
"Variable Speed Drives",
"Autism",
"Educational Institutions",
"Problem Solving",
"Avatars",
"Social Problem Solving",
"Virtual Learning Environments",
"Autism Spectrum Disorders",
"Social Stories"
],
"authors": [
{
"affiliation": null,
"fullName": "Christina Volioti",
"givenName": "Christina",
"surname": "Volioti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Thrasyvoulos Tsiatsos",
"givenName": "Thrasyvoulos",
"surname": "Tsiatsos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sophia Mavropoulou",
"givenName": "Sophia",
"surname": "Mavropoulou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Charalampos Karagiannidis",
"givenName": "Charalampos",
"surname": "Karagiannidis",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "606-610",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4038-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4038a601",
"articleId": "12OmNrkjVij",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4038a611",
"articleId": "12OmNBTs7v2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ichi/2013/5089/0/5089a484",
"title": "Can NAO Robot Improve Eye-Gaze Attention of Children with High Functioning Autism?",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2013/5089a484/12OmNAoDifg",
"parentPublication": {
"id": "proceedings/ichi/2013/5089/0",
"title": "2013 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726798",
"title": "Considerations in Autism therapy using robotics",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726798/12OmNvm6VGY",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948448",
"title": "[Poster] An augmented and virtual reality system for training autistic children",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948448/12OmNyKa5Zf",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0/08328405",
"title": "Buddy: A Virtual Life Coaching System for Children and Adolescents with High Functioning Autism",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2017/08328405/17D45Wuc374",
"parentPublication": {
"id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0",
"title": "2017 IEEE 15th Intl Conf on Dependable, Autonomic and Secure Computing, 15th Intl Conf on Pervasive Intelligence and Computing, 3rd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798032",
"title": "Teachers' Views on how to use Virtual Reality to Instruct Children and Adolescents Diagnosed with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798032/1cJ0YBL70AM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2019/3888/0/08925474",
"title": "Computational Modeling of Psycho-physiological Arousal and Social Initiation of children with Autism in Interventions through Full-Body Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2019/08925474/1fHGE1f2lNe",
"parentPublication": {
"id": "proceedings/acii/2019/3888/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2020/02/08998323",
"title": "Virtual Avatar-Based Life Coaching for Children With Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/magazine/co/2020/02/08998323/1hrH6aow132",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscv/2020/8041/0/09204125",
"title": "Augmented reality for children with Autism Spectrum Disorder - A systematic review",
"doi": null,
"abstractUrl": "/proceedings-article/iscv/2020/09204125/1nmi7vlqm9q",
"parentPublication": {
"id": "proceedings/iscv/2020/8041/0",
"title": "2020 International Conference on Intelligent Systems and Computer Vision (ISCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a243",
"title": "Augmented Reality and Autism Spectrum Disorder Rehabilitation: Scoping review",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a243/1qpzAvoH5K0",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a761",
"title": "Developing Computational Thinking for Children with Autism using a Serious Game",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a761/1rSRclkCpzO",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvDqsDN",
"title": "2017 14th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"acronym": "avss",
"groupId": "1001307",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxA3Z9v",
"doi": "10.1109/AVSS.2017.8078544",
"title": "3D-AD: 3D-autism dataset for repetitive behaviours with kinect sensor",
"normalizedTitle": "3D-AD: 3D-autism dataset for repetitive behaviours with kinect sensor",
"abstract": "Autism spectrum disorders (ASD) is a disorder that affects communication, social skills or behaviours of some people. Children or adults with ASD often have some common repetitive behaviours or self-stimulatory behaviours. These behaviours usually refer to specific behaviours such as flapping, rocking, spinning, etc. This work investigates these behaviours and provides a benchmark dataset for researchers. In our knowledge, this dataset is the first 3D-dataset available online1 in the area of 3D recognitions of complex and repetitive behaviours of autistic people. The 3D-Autism Dataset (3D-AD) is captured with Kinect sensor. We explore different categories of autistic repetitive behaviours: static and dynamic ones, simple and complex ones. Experiments have been done using dynamic time warping to detect these behaviours.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Autism spectrum disorders (ASD) is a disorder that affects communication, social skills or behaviours of some people. Children or adults with ASD often have some common repetitive behaviours or self-stimulatory behaviours. These behaviours usually refer to specific behaviours such as flapping, rocking, spinning, etc. This work investigates these behaviours and provides a benchmark dataset for researchers. In our knowledge, this dataset is the first 3D-dataset available online1 in the area of 3D recognitions of complex and repetitive behaviours of autistic people. The 3D-Autism Dataset (3D-AD) is captured with Kinect sensor. We explore different categories of autistic repetitive behaviours: static and dynamic ones, simple and complex ones. Experiments have been done using dynamic time warping to detect these behaviours.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Autism spectrum disorders (ASD) is a disorder that affects communication, social skills or behaviours of some people. Children or adults with ASD often have some common repetitive behaviours or self-stimulatory behaviours. These behaviours usually refer to specific behaviours such as flapping, rocking, spinning, etc. This work investigates these behaviours and provides a benchmark dataset for researchers. In our knowledge, this dataset is the first 3D-dataset available online1 in the area of 3D recognitions of complex and repetitive behaviours of autistic people. The 3D-Autism Dataset (3D-AD) is captured with Kinect sensor. We explore different categories of autistic repetitive behaviours: static and dynamic ones, simple and complex ones. Experiments have been done using dynamic time warping to detect these behaviours.",
"fno": "08078544",
"keywords": [
"Skeleton",
"Face",
"Three Dimensional Displays",
"Legged Locomotion",
"Feature Extraction",
"Autism",
"Heuristic Algorithms"
],
"authors": [
{
"affiliation": "Aix-Marseille University, LSIS-UMR CNRS 7296, 163 Avenue of Luminy, Cedex 9, Marseille 13288, France",
"fullName": "Omar Rihawi",
"givenName": "Omar",
"surname": "Rihawi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aix-Marseille University, LSIS-UMR CNRS 7296, 163 Avenue of Luminy, Cedex 9, Marseille 13288, France",
"fullName": "Djamal Merad",
"givenName": "Djamal",
"surname": "Merad",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aix-Marseille University, LSIS-UMR CNRS 7296, 163 Avenue of Luminy, Cedex 9, Marseille 13288, France",
"fullName": "Jean-luc Damoiseaux",
"givenName": "Jean-luc",
"surname": "Damoiseaux",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "avss",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-2939-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08078543",
"articleId": "12OmNB0X8wp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08078545",
"articleId": "12OmNxGja7O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504695",
"title": "Multimodal adaptive social interaction in virtual environment (MASI-VR) for children with Autism spectrum disorders (ASD)",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504695/12OmNwBT1na",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a755",
"title": "Self-Stimulatory Behaviours in the Wild for Autism Diagnosis",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a755/12OmNxHJ9sW",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177518",
"title": "MEBook: Kinect-based self-modeling intervention for children with autism",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177518/12OmNyPQ4F1",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2017/5920/0/08190490",
"title": "Exploring the use of virtual learning environments to support science learning in autistic students",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2017/08190490/12OmNyaXPMu",
"parentPublication": {
"id": "proceedings/fie/2017/5920/0",
"title": "2017 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2015/0481/0/07394384",
"title": "Repetitive motion detection for human behavior understanding from video images",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2015/07394384/12OmNyo1o6j",
"parentPublication": {
"id": "proceedings/isspit/2015/0481/0",
"title": "2015 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-iscmht/2017/1023/0/08338597",
"title": "Towards developing a learning tool for children with autism",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-iscmht/2017/08338597/12OmNzV70vf",
"parentPublication": {
"id": "proceedings/iciev-iscmht/2017/1023/0",
"title": "2017 6th International Conference on Informatics, Electronics and Vision & 2017 7th International Symposium in Computational Medical and Health Technology (ICIEV-ISCMHT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2018/7123/0/08493439",
"title": "Puzzle Walk: A Gamified Mobile App to Increase Physical Activity in Adults with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493439/14tNJlStZmA",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2021/2757/0/275700a299",
"title": "Avatarizing Children with Autism Spectrum Disorder into Serious Games for Social Communication Skill Intervention",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2021/275700a299/1AFsrorE4A8",
"parentPublication": {
"id": "proceedings/eitt/2021/2757/0",
"title": "2021 Tenth International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714809",
"title": "A Virtual Reality Based System for the Screening and Classification of Autism",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714809/1B2D3XRL9h6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciddt/2020/0367/0/036700a189",
"title": "Design of Humanoid Robot Based Training APP on Social Skills of Children with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/iciddt/2020/036700a189/1wutJvVno3K",
"parentPublication": {
"id": "proceedings/iciddt/2020/0367/0",
"title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAsTgXb",
"title": "2017 IEEE Frontiers in Education Conference (FIE)",
"acronym": "fie",
"groupId": "1000297",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyaXPMu",
"doi": "10.1109/FIE.2017.8190490",
"title": "Exploring the use of virtual learning environments to support science learning in autistic students",
"normalizedTitle": "Exploring the use of virtual learning environments to support science learning in autistic students",
"abstract": "Autism and Autism Spectrum Disorders (ASD) are general terms for a group of complex disorders of brain development. Autistic children exhibit certain characteristics in varying degrees including difficulties in verbal/non-verbal communication, social interaction and repetitive behaviors. This paper discusses the role of Virtual Learning Environments (VLEs) in helping autistic children learn science and engineering concepts. VLEs are a type cyber learning environments created using Virtual Reality technology; as part of a learning activities, a set of VLEs to teach autistic students concepts in related to the solar system, robotics and density has been developed; assessment results underscore the potential of such VLEs to support science and engineering learning.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Autism and Autism Spectrum Disorders (ASD) are general terms for a group of complex disorders of brain development. Autistic children exhibit certain characteristics in varying degrees including difficulties in verbal/non-verbal communication, social interaction and repetitive behaviors. This paper discusses the role of Virtual Learning Environments (VLEs) in helping autistic children learn science and engineering concepts. VLEs are a type cyber learning environments created using Virtual Reality technology; as part of a learning activities, a set of VLEs to teach autistic students concepts in related to the solar system, robotics and density has been developed; assessment results underscore the potential of such VLEs to support science and engineering learning.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Autism and Autism Spectrum Disorders (ASD) are general terms for a group of complex disorders of brain development. Autistic children exhibit certain characteristics in varying degrees including difficulties in verbal/non-verbal communication, social interaction and repetitive behaviors. This paper discusses the role of Virtual Learning Environments (VLEs) in helping autistic children learn science and engineering concepts. VLEs are a type cyber learning environments created using Virtual Reality technology; as part of a learning activities, a set of VLEs to teach autistic students concepts in related to the solar system, robotics and density has been developed; assessment results underscore the potential of such VLEs to support science and engineering learning.",
"fno": "08190490",
"keywords": [
"Autism",
"Robots",
"Virtual Environments",
"Three Dimensional Displays",
"Visualization",
"Haptic Interfaces",
"Autism",
"Virtual Learning Environments",
"Cyber Learning"
],
"authors": [
{
"affiliation": "Computer Science, Center for Cyber Physical Systems (CCPS), Oklahoma, State University, Stillwater, USA",
"fullName": "J. Cecil",
"givenName": "J.",
"surname": "Cecil",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Applied Behavioral, Analysis-OK, Anselm Center, Edmond, USA",
"fullName": "Mary Sweet-Darter",
"givenName": "Mary",
"surname": "Sweet-Darter",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Stillwater High School and CCPS Soaring Eagle Research, Stillwater, Oklahoma",
"fullName": "Aaron Cecil-Xavier",
"givenName": "Aaron",
"surname": "Cecil-Xavier",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2017",
"issn": null,
"isbn": "978-1-5090-5920-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08190489",
"articleId": "12OmNyKa5Z8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08190491",
"articleId": "12OmNx9WSWO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2011/4346/0/4346a017",
"title": "A Computer Game Based Approach for Increasing Fluency in the Speech of the Autistic Children",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2011/4346a017/12OmNqGRGbO",
"parentPublication": {
"id": "proceedings/icalt/2011/4346/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2011/4589/0/4589a559",
"title": "Developing the Concept of Money by Interactive Computer Games for Autistic Children",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2011/4589a559/12OmNvkpl9A",
"parentPublication": {
"id": "proceedings/ism/2011/4589/0",
"title": "2011 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2010/4217/0/4217a383",
"title": "Increasing Intelligibility in the Speech of the Autistic Children by an Interactive Computer Game",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2010/4217a383/12OmNwEJ0HO",
"parentPublication": {
"id": "proceedings/ism/2010/4217/0",
"title": "2010 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/c5/2008/3115/0/3115a077",
"title": "Evaluation, Training and Measurement System for Autistic Children",
"doi": null,
"abstractUrl": "/proceedings-article/c5/2008/3115a077/12OmNx7ouJd",
"parentPublication": {
"id": "proceedings/c5/2008/3115/0",
"title": "2008 6th International Conference on Creating, Connecting and Collaborating through Computing (C5 '08)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsacw/2014/3578/0/3578a264",
"title": "Towards a User Model for the Design of Adaptive Interfaces for Autistic Users",
"doi": null,
"abstractUrl": "/proceedings-article/compsacw/2014/3578a264/12OmNzvQHOH",
"parentPublication": {
"id": "proceedings/compsacw/2014/3578/0",
"title": "2014 IEEE 38th International Computer Software and Applications Conference Workshops (COMPSACW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714809",
"title": "A Virtual Reality Based System for the Screening and Classification of Autism",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714809/1B2D3XRL9h6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2019/5686/0/568600a239",
"title": "MeltdownCrisis: Dataset of Autistic Children During Meltdown Crisis",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2019/568600a239/1j9xEe4o8DK",
"parentPublication": {
"id": "proceedings/sitis/2019/5686/0",
"title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2019/1419/0/09146086",
"title": "Cognitive Attention in Autism using Virtual Reality Learning Tool",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2019/09146086/1lFJdBX68Ks",
"parentPublication": {
"id": "proceedings/icci*cc/2019/1419/0",
"title": "2019 IEEE 18th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2020/8961/0/09274031",
"title": "Design and Assessment of Virtual Learning Environments to Support STEM Learning for Autistic Students",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2020/09274031/1phRSJ3pmZq",
"parentPublication": {
"id": "proceedings/fie/2020/8961/0",
"title": "2020 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2021/3851/0/09637130",
"title": "Role of Affordance, Visual Density and Other HCC Criteria in Designing Virtual Learning Environments to Support STEM Learning for Autistic Students",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2021/09637130/1zuvTJXuBt6",
"parentPublication": {
"id": "proceedings/fie/2021/3851/0",
"title": "2021 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itC",
"title": "2007 4th International Conference on Information Technology New Generations",
"acronym": "itng",
"groupId": "1001685",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzhELlO",
"doi": "10.1109/ITNG.2007.91",
"title": "Exploration of Autism Expert Systems",
"normalizedTitle": "Exploration of Autism Expert Systems",
"abstract": "Social and emotional ties are an important anchor in life and hence a lack of these is a major hurdle in achieving one's dreams and goals. Autism is a Social development disorder that is marked by a distinct lack of social and language skills. It is a disorder rather than an organic disease and hence is difficult to diagnose without a high index of suspicion. Prevalence rates for Autism, in various societies ranges from 1 in 10000 to 15 in 10000. Autism is a socially crippling disorder and can be a huge strain on families caring for these children. The Indian scenario with regard to Autism is still in its nascent stage. There is very limited resource and infrastructure available for these special children. Most Autistic children attend special care schools that provide for children with Cerebral Palsy or Downs Syndrome. Hence there arises a need for developing technological aids designed to help diagnosis and increase parent participation for Autistic children. This led to the development of a Knowledge based screener (KBS) and an intelligent trainer system that can detect all categories of the developmental disorders. Social developmental disorders comprise a wide spectrum. Pervasive developmental and similar disorders constitute this spectrum. This knowledge-based system we have developed will help to detect the presence of these disorders and provide home-based intervention. The main goal of our study is to develop technological aids, which will help Autistic individuals to be identified earlier and initiate early intervention for the management of autism",
"abstracts": [
{
"abstractType": "Regular",
"content": "Social and emotional ties are an important anchor in life and hence a lack of these is a major hurdle in achieving one's dreams and goals. Autism is a Social development disorder that is marked by a distinct lack of social and language skills. It is a disorder rather than an organic disease and hence is difficult to diagnose without a high index of suspicion. Prevalence rates for Autism, in various societies ranges from 1 in 10000 to 15 in 10000. Autism is a socially crippling disorder and can be a huge strain on families caring for these children. The Indian scenario with regard to Autism is still in its nascent stage. There is very limited resource and infrastructure available for these special children. Most Autistic children attend special care schools that provide for children with Cerebral Palsy or Downs Syndrome. Hence there arises a need for developing technological aids designed to help diagnosis and increase parent participation for Autistic children. This led to the development of a Knowledge based screener (KBS) and an intelligent trainer system that can detect all categories of the developmental disorders. Social developmental disorders comprise a wide spectrum. Pervasive developmental and similar disorders constitute this spectrum. This knowledge-based system we have developed will help to detect the presence of these disorders and provide home-based intervention. The main goal of our study is to develop technological aids, which will help Autistic individuals to be identified earlier and initiate early intervention for the management of autism",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Social and emotional ties are an important anchor in life and hence a lack of these is a major hurdle in achieving one's dreams and goals. Autism is a Social development disorder that is marked by a distinct lack of social and language skills. It is a disorder rather than an organic disease and hence is difficult to diagnose without a high index of suspicion. Prevalence rates for Autism, in various societies ranges from 1 in 10000 to 15 in 10000. Autism is a socially crippling disorder and can be a huge strain on families caring for these children. The Indian scenario with regard to Autism is still in its nascent stage. There is very limited resource and infrastructure available for these special children. Most Autistic children attend special care schools that provide for children with Cerebral Palsy or Downs Syndrome. Hence there arises a need for developing technological aids designed to help diagnosis and increase parent participation for Autistic children. This led to the development of a Knowledge based screener (KBS) and an intelligent trainer system that can detect all categories of the developmental disorders. Social developmental disorders comprise a wide spectrum. Pervasive developmental and similar disorders constitute this spectrum. This knowledge-based system we have developed will help to detect the presence of these disorders and provide home-based intervention. The main goal of our study is to develop technological aids, which will help Autistic individuals to be identified earlier and initiate early intervention for the management of autism",
"fno": "04151694",
"keywords": [
"Behavioural Sciences Computing",
"Knowledge Based Systems",
"Social Sciences Computing",
"Autism",
"Expert Systems",
"Social Development Disorder",
"Social Skills",
"Language Skills",
"Knowledge Based Screener",
"Intelligent Trainer System",
"Knowledge Based System",
"Artificial Intelligence",
"Intelligent Gaming System",
"Screening Systems",
"Autism",
"Expert Systems",
"Artificial Intelligence",
"Delay",
"Pediatrics",
"Management Training",
"Knowledge Based Systems",
"Intelligent Systems",
"Medical Diagnostic Imaging",
"Birth Disorders"
],
"authors": [
{
"affiliation": "Tata Consultancy Services Ltd",
"fullName": "Sampathkumar Veeraraghavan",
"givenName": "Sampathkumar",
"surname": "Veeraraghavan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Family Physician Chennai, India",
"fullName": "Karthik Srinivasan",
"givenName": "Karthik",
"surname": "Srinivasan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "itng",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-04-01T00:00:00",
"pubType": "proceedings",
"pages": "261-264",
"year": "2007",
"issn": null,
"isbn": "0-7695-2776-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "27760255",
"articleId": "12OmNA0dMMj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "27760271",
"articleId": "12OmNwcUk2m",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icci*cc/2017/0771/0/08109784",
"title": "Estimation of biomarkers for autism and its co-morbidities using resting state EEG",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2017/08109784/12OmNAOKnSC",
"parentPublication": {
"id": "proceedings/icci*cc/2017/0771/0",
"title": "2017 IEEE 16th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2016/6117/0/6117a349",
"title": "Visual Effect on the Odor Identification Ability of Children with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2016/6117a349/12OmNBSjIWR",
"parentPublication": {
"id": "proceedings/ichi/2016/6117/0",
"title": "2016 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2011/4589/0/4589a559",
"title": "Developing the Concept of Money by Interactive Computer Games for Autistic Children",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2011/4589a559/12OmNvkpl9A",
"parentPublication": {
"id": "proceedings/ism/2011/4589/0",
"title": "2011 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2012/1353/0/06462271",
"title": "Work in progress: Using smart mobile tools to enhance autism therapy for children",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2012/06462271/12OmNwlHSVj",
"parentPublication": {
"id": "proceedings/fie/2012/1353/0",
"title": "2012 Frontiers in Education Conference Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csi/2016/4871/0/07916988",
"title": "Measurement of Ocular Movement Abnormality in Pursuit Eye Movement (PEM) of Autism Spectrum Children with Disability",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csi/2016/07916988/12OmNy50g7a",
"parentPublication": {
"id": "proceedings/acit-csi/2016/4871/0",
"title": "2016 4th Intl. Conf. on Applied Computing and Information Technology (ACIT), 3rd Intl. Conf. on Computational Science/Intelligence and Applied Informatics (CSII), and 1st Intl. Conf. on Big Data, Cloud Computing, Data Science & Engineering (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2017/5920/0/08190490",
"title": "Exploring the use of virtual learning environments to support science learning in autistic students",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2017/08190490/12OmNyaXPMu",
"parentPublication": {
"id": "proceedings/fie/2017/5920/0",
"title": "2017 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-iscmht/2017/1023/0/08338597",
"title": "Towards developing a learning tool for children with autism",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-iscmht/2017/08338597/12OmNzV70vf",
"parentPublication": {
"id": "proceedings/iciev-iscmht/2017/1023/0",
"title": "2017 6th International Conference on Informatics, Electronics and Vision & 2017 7th International Symposium in Computational Medical and Health Technology (ICIEV-ISCMHT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2019/1419/0/09146086",
"title": "Cognitive Attention in Autism using Virtual Reality Learning Tool",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2019/09146086/1lFJdBX68Ks",
"parentPublication": {
"id": "proceedings/icci*cc/2019/1419/0",
"title": "2019 IEEE 18th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2020/1974/0/09411531",
"title": "Autism Spectrum Disorder Detection in Toddlers for Early Diagnosis Using Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2020/09411531/1taF6KpeD04",
"parentPublication": {
"id": "proceedings/csde/2020/1974/0",
"title": "2020 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciddt/2020/0367/0/036700a189",
"title": "Design of Humanoid Robot Based Training APP on Social Skills of Children with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/iciddt/2020/036700a189/1wutJvVno3K",
"parentPublication": {
"id": "proceedings/iciddt/2020/0367/0",
"title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "14tNJlStZmx",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"acronym": "vs-games",
"groupId": "1002788",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "14tNJuA5rDa",
"doi": "10.1109/VS-Games.2018.8493421",
"title": "Enable an Innovative Prolonged Exposure Therapy of Attention Deficits on Autism Spectrum through Adaptive Virtual Environments",
"normalizedTitle": "Enable an Innovative Prolonged Exposure Therapy of Attention Deficits on Autism Spectrum through Adaptive Virtual Environments",
"abstract": "A prototype of adaptive virtual environments therapy system (AVET) was developed which will enable innovative Virtual Reality (VR)-based therapy approach for children with attention deficit on the autism spectrum. Many systems have successfully used VR in Autism Spectrum Disorders (ASD) therapies. Most of them use VR as an alternative way to conduct therapies by simulating traditional therapies or real-life experiences. The AVET employed VR-exclusive \"impossible experiences\" (e.g., a chair that deforms upon the user's gaze, a transparent human) which are not available in real world. The AVET identifies, influences the user's cognition, and delivers a customized Prolonged Exposure (PE)-style VR therapy for children with attention deficits on the autism spectrum. We conducted a preliminary evaluation to the current AVET prototype with the experts. Based on the interview feedbacks, we anticipate the AVET will have a great potential to deliver innovative and effective ASD attention training therapies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A prototype of adaptive virtual environments therapy system (AVET) was developed which will enable innovative Virtual Reality (VR)-based therapy approach for children with attention deficit on the autism spectrum. Many systems have successfully used VR in Autism Spectrum Disorders (ASD) therapies. Most of them use VR as an alternative way to conduct therapies by simulating traditional therapies or real-life experiences. The AVET employed VR-exclusive \"impossible experiences\" (e.g., a chair that deforms upon the user's gaze, a transparent human) which are not available in real world. The AVET identifies, influences the user's cognition, and delivers a customized Prolonged Exposure (PE)-style VR therapy for children with attention deficits on the autism spectrum. We conducted a preliminary evaluation to the current AVET prototype with the experts. Based on the interview feedbacks, we anticipate the AVET will have a great potential to deliver innovative and effective ASD attention training therapies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A prototype of adaptive virtual environments therapy system (AVET) was developed which will enable innovative Virtual Reality (VR)-based therapy approach for children with attention deficit on the autism spectrum. Many systems have successfully used VR in Autism Spectrum Disorders (ASD) therapies. Most of them use VR as an alternative way to conduct therapies by simulating traditional therapies or real-life experiences. The AVET employed VR-exclusive \"impossible experiences\" (e.g., a chair that deforms upon the user's gaze, a transparent human) which are not available in real world. The AVET identifies, influences the user's cognition, and delivers a customized Prolonged Exposure (PE)-style VR therapy for children with attention deficits on the autism spectrum. We conducted a preliminary evaluation to the current AVET prototype with the experts. Based on the interview feedbacks, we anticipate the AVET will have a great potential to deliver innovative and effective ASD attention training therapies.",
"fno": "08493421",
"keywords": [
"Medical Treatment",
"Variable Speed Drives",
"Autism",
"Training",
"Prototypes",
"Virtual Environments"
],
"authors": [
{
"affiliation": null,
"fullName": "Chao Mei",
"givenName": "Chao",
"surname": "Mei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rongkai Guo",
"givenName": "Rongkai",
"surname": "Guo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vs-games",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1-4",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7123-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08493420",
"articleId": "14tNJmUlJD3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08493422",
"articleId": "14tNJpmuJlH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icccnt/2013/3926/0/06726798",
"title": "Considerations in Autism therapy using robotics",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726798/12OmNvm6VGY",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2014/4038/0/4038a606",
"title": "VLSS -- Virtual Learning and Social Stories for Children with Autism",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2014/4038a606/12OmNwCJOQ9",
"parentPublication": {
"id": "proceedings/icalt/2014/4038/0",
"title": "2014 IEEE 14th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948448",
"title": "[Poster] An augmented and virtual reality system for training autistic children",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948448/12OmNyKa5Zf",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344621",
"title": "Cognitive state measurement from eye gaze analysis in an intelligent virtual reality driving system for autism intervention",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344621/12OmNyfdOR4",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040711",
"title": "Understanding How Adolescents with Autism Respond to Facial Expressions in Virtual Reality Environments",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040711/13rRUwcAqqf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2017/02/07495013",
"title": "Cognitive Load Measurement in a Virtual Reality-Based Driving System for Autism Intervention",
"doi": null,
"abstractUrl": "/journal/ta/2017/02/07495013/13rRUwhpBMP",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2022/5490/0/10086039",
"title": "Towards adaptive and personalized robotic therapy for children with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2022/10086039/1M66aeTe1Ow",
"parentPublication": {
"id": "proceedings/aciiw/2022/5490/0",
"title": "2022 10th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798032",
"title": "Teachers' Views on how to use Virtual Reality to Instruct Children and Adolescents Diagnosed with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798032/1cJ0YBL70AM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/1/260701a630",
"title": "Application of Reconstructed Phase Space in Autism Intervention",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260701a630/1cYiwZcclRm",
"parentPublication": {
"id": "proceedings/compsac/2019/2607/1",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscv/2020/8041/0/09204125",
"title": "Augmented reality for children with Autism Spectrum Disorder - A systematic review",
"doi": null,
"abstractUrl": "/proceedings-article/iscv/2020/09204125/1nmi7vlqm9q",
"parentPublication": {
"id": "proceedings/iscv/2020/8041/0",
"title": "2020 International Conference on Intelligent Systems and Computer Vision (ISCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1lFJ9Evt0pG",
"title": "2019 IEEE 18th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"acronym": "icci*cc",
"groupId": "1000097",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1lFJdBX68Ks",
"doi": "10.1109/ICCICC46617.2019.9146086",
"title": "Cognitive Attention in Autism using Virtual Reality Learning Tool",
"normalizedTitle": "Cognitive Attention in Autism using Virtual Reality Learning Tool",
"abstract": "Individuals with autism spectrum disorder demonstrate impairments in social functions including difficulties in social interactions, social communication and emotion recognition. They struggle in making eye contact and this hampers their learning due to lack of listening. Virtual reality provides a supportive environment for individuals with autism disorders to learn and practice things safely. In this work, a Virtual reality (VR) platform has been developed which acts as a simulative learning tool to teach and train kids affected with autism spectral disorder. EEG signals of participants during flashcard teaching and VR teaching sessions were acquired and functional connectivity parameters were estimated from the acquired signals. This VR experiment with autistic children indicate that their cognitive ability can be enhanced through constructive teaching by using suitable VR environments. Thus it can be extended for different learning disorders in assisting children to practice things safely without any social fear and in enhancing their interpersonal skills.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Individuals with autism spectrum disorder demonstrate impairments in social functions including difficulties in social interactions, social communication and emotion recognition. They struggle in making eye contact and this hampers their learning due to lack of listening. Virtual reality provides a supportive environment for individuals with autism disorders to learn and practice things safely. In this work, a Virtual reality (VR) platform has been developed which acts as a simulative learning tool to teach and train kids affected with autism spectral disorder. EEG signals of participants during flashcard teaching and VR teaching sessions were acquired and functional connectivity parameters were estimated from the acquired signals. This VR experiment with autistic children indicate that their cognitive ability can be enhanced through constructive teaching by using suitable VR environments. Thus it can be extended for different learning disorders in assisting children to practice things safely without any social fear and in enhancing their interpersonal skills.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Individuals with autism spectrum disorder demonstrate impairments in social functions including difficulties in social interactions, social communication and emotion recognition. They struggle in making eye contact and this hampers their learning due to lack of listening. Virtual reality provides a supportive environment for individuals with autism disorders to learn and practice things safely. In this work, a Virtual reality (VR) platform has been developed which acts as a simulative learning tool to teach and train kids affected with autism spectral disorder. EEG signals of participants during flashcard teaching and VR teaching sessions were acquired and functional connectivity parameters were estimated from the acquired signals. This VR experiment with autistic children indicate that their cognitive ability can be enhanced through constructive teaching by using suitable VR environments. Thus it can be extended for different learning disorders in assisting children to practice things safely without any social fear and in enhancing their interpersonal skills.",
"fno": "09146086",
"keywords": [
"Cognition",
"Computer Aided Instruction",
"Electroencephalography",
"Emotion Recognition",
"Medical Disorders",
"Medical Signal Processing",
"Teaching",
"Virtual Reality",
"Virtual Reality Learning Tool",
"Autism Spectrum Disorder",
"Cognitive Attention",
"Social Fear",
"Suitable VR Environments",
"Constructive Teaching",
"Cognitive Ability",
"VR Experiment",
"Functional Connectivity Parameters",
"Flashcard Teaching",
"Autism Spectral Disorder",
"Simulative Learning Tool",
"Virtual Reality Platform",
"Autism Disorders",
"Eye Contact",
"Emotion Recognition",
"Social Communication",
"Social Interactions",
"Social Functions",
"Electroencephalography",
"Electrodes",
"Education",
"Autism",
"Tools",
"Virtual Reality",
"Coherence",
"Autism",
"Electroencephalography",
"Functional Connectivity",
"Cognitive Ability",
"Learning Disabilities",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Centre for Heathcare Technologies, SSN College of Engineering,Department of Biomedical Engineering",
"fullName": "S Vidhusha",
"givenName": "S",
"surname": "Vidhusha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Centre for Heathcare Technologies, SSN College of Engineering,Department of Biomedical Engineering",
"fullName": "B Divya",
"givenName": "B",
"surname": "Divya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Centre for Heathcare Technologies, SSN College of Engineering,Department of Biomedical Engineering",
"fullName": "A Kavitha",
"givenName": "A",
"surname": "Kavitha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Centre for Heathcare Technologies, SSN College of Engineering,Department of Biomedical Engineering",
"fullName": "R Viswath Narayanan",
"givenName": "R",
"surname": "Viswath Narayanan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Centre for Heathcare Technologies, SSN College of Engineering,Department of Biomedical Engineering",
"fullName": "D Yaamini",
"givenName": "D",
"surname": "Yaamini",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icci*cc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-07-01T00:00:00",
"pubType": "proceedings",
"pages": "159-165",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1419-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09146069",
"articleId": "1lFJbjus2gU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09146032",
"articleId": "1lFJeTrDmcE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icci*cc/2017/0771/0/08109784",
"title": "Estimation of biomarkers for autism and its co-morbidities using resting state EEG",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2017/08109784/12OmNAOKnSC",
"parentPublication": {
"id": "proceedings/icci*cc/2017/0771/0",
"title": "2017 IEEE 16th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2016/1269/0/07760073",
"title": "Autism Barta — A smart device based automated autism screening tool for Bangladesh",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2016/07760073/12OmNAlNiL1",
"parentPublication": {
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hisb/2011/4407/0/4407a359",
"title": "Autism: A Systems Biology Disease",
"doi": null,
"abstractUrl": "/proceedings-article/hisb/2011/4407a359/12OmNCga1PC",
"parentPublication": {
"id": "proceedings/hisb/2011/4407/0",
"title": "Healthcare Informatics, Imaging and Systems Biology, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726798",
"title": "Considerations in Autism therapy using robotics",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726798/12OmNvm6VGY",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948448",
"title": "[Poster] An augmented and virtual reality system for training autistic children",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948448/12OmNyKa5Zf",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344621",
"title": "Cognitive state measurement from eye gaze analysis in an intelligent virtual reality driving system for autism intervention",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344621/12OmNyfdOR4",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2017/02/07495013",
"title": "Cognitive Load Measurement in a Virtual Reality-Based Driving System for Autism Intervention",
"doi": null,
"abstractUrl": "/journal/ta/2017/02/07495013/13rRUwhpBMP",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2018/7123/0/08493421",
"title": "Enable an Innovative Prolonged Exposure Therapy of Attention Deficits on Autism Spectrum through Adaptive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493421/14tNJuA5rDa",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cic/2019/6739/0/673900a057",
"title": "An Interactive Play Environment on Mathematics and Cognitive Training with Behavioral Tracking for Children with Autism",
"doi": null,
"abstractUrl": "/proceedings-article/cic/2019/673900a057/1hrMfjKbNGE",
"parentPublication": {
"id": "proceedings/cic/2019/6739/0",
"title": "2019 IEEE 5th International Conference on Collaboration and Internet Computing (CIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1qpzz6dhLLq",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qpzAvoH5K0",
"doi": "10.1109/AIVR50618.2020.00051",
"title": "Augmented Reality and Autism Spectrum Disorder Rehabilitation: Scoping review",
"normalizedTitle": "Augmented Reality and Autism Spectrum Disorder Rehabilitation: Scoping review",
"abstract": "Currently, augmented reality is integrated into several areas (medical, therapeutic, educational, entertainment,). One of these important areas in which the augmented reality has been incorporated in the field of rehabilitation for autism spectrum disorder (ASD) children, but it does not serve the field well. This report will provide an overview of how to use AR technology in the field of autism spectrum disorder (ASD), from an entertainment perspective to develop their communication skills. Where solutions to the problems suggested in previous research will be discussed and attempted to develop them to reach new hypotheses that serve the use of augmented reality AR to better rehabilitate the behavior of autistic children. The report proposes a future hypothesis and a research plan to solve the problem.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Currently, augmented reality is integrated into several areas (medical, therapeutic, educational, entertainment,). One of these important areas in which the augmented reality has been incorporated in the field of rehabilitation for autism spectrum disorder (ASD) children, but it does not serve the field well. This report will provide an overview of how to use AR technology in the field of autism spectrum disorder (ASD), from an entertainment perspective to develop their communication skills. Where solutions to the problems suggested in previous research will be discussed and attempted to develop them to reach new hypotheses that serve the use of augmented reality AR to better rehabilitate the behavior of autistic children. The report proposes a future hypothesis and a research plan to solve the problem.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Currently, augmented reality is integrated into several areas (medical, therapeutic, educational, entertainment,). One of these important areas in which the augmented reality has been incorporated in the field of rehabilitation for autism spectrum disorder (ASD) children, but it does not serve the field well. This report will provide an overview of how to use AR technology in the field of autism spectrum disorder (ASD), from an entertainment perspective to develop their communication skills. Where solutions to the problems suggested in previous research will be discussed and attempted to develop them to reach new hypotheses that serve the use of augmented reality AR to better rehabilitate the behavior of autistic children. The report proposes a future hypothesis and a research plan to solve the problem.",
"fno": "746300a243",
"keywords": [
"Augmented Reality",
"Handicapped Aids",
"Medical Disorders",
"Patient Rehabilitation",
"Autism Spectrum Disorder Rehabilitation",
"Scoping Review",
"Medical Entertainment",
"Therapeutic Entertainment",
"Educational Entertainment",
"Autism Spectrum Disorder Children",
"ASD",
"Augmented Reality AR",
"Variable Speed Drives",
"Pediatrics",
"Autism",
"Augmented Reality",
"Training",
"Task Analysis",
"Smart Phones",
"Autism Spectrum Disorder ASD",
"Augmented Reality AR",
"And Entertainment"
],
"authors": [
{
"affiliation": "Taibah University, Saudi Arabia,Computer Science Department,Al Madinah,Saudi Arabia",
"fullName": "Hanin A. Almurashi",
"givenName": "Hanin A.",
"surname": "Almurashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Taibah University,Computer Science Department,Saudi Arabia",
"fullName": "Rahma Bouaziz",
"givenName": "Rahma",
"surname": "Bouaziz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "243-246",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7463-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "746300a239",
"articleId": "1qpzADQ7pAY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "746300a247",
"articleId": "1qpzE3VSLlK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/asonam/2014/5877/0/06921609",
"title": "Data-mining twitter and the autism spectrum disorder: A Pilot study",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2014/06921609/12OmNqHItu7",
"parentPublication": {
"id": "proceedings/asonam/2014/5877/0",
"title": "2014 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726798",
"title": "Considerations in Autism therapy using robotics",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726798/12OmNvm6VGY",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d287",
"title": "Learning Visual Attention to Identify People with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d287/12OmNyQ7Ga7",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/10024337",
"title": "Computer-Aided Autism Spectrum Disorder Diagnosis With Behavior Signal Processing",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/10024337/1KaB0oT2ziU",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a625",
"title": "Visual Attention Modeling for Autism Spectrum Disorder by Semantic Features",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a625/1cJ0EqHvcT6",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798032",
"title": "Teachers' Views on how to use Virtual Reality to Instruct Children and Adolescents Diagnosed with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798032/1cJ0YBL70AM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600a847",
"title": "Deep Learning Based Multimedia Data Mining for Autism Spectrum Disorder (ASD) Diagnosis",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600a847/1gAwWdB8K9G",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscv/2020/8041/0/09204125",
"title": "Augmented reality for children with Autism Spectrum Disorder - A systematic review",
"doi": null,
"abstractUrl": "/proceedings-article/iscv/2020/09204125/1nmi7vlqm9q",
"parentPublication": {
"id": "proceedings/iscv/2020/8041/0",
"title": "2020 International Conference on Intelligent Systems and Computer Vision (ISCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313278",
"title": "Predicting eye movement and fixation patterns on scenic images using Machine Learning for Children with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313278/1qmg2QvsNK8",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2020/1974/0/09411531",
"title": "Autism Spectrum Disorder Detection in Toddlers for Early Diagnosis Using Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2020/09411531/1taF6KpeD04",
"parentPublication": {
"id": "proceedings/csde/2020/1974/0",
"title": "2020 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwKoZd0",
"title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications",
"acronym": "trustcom",
"groupId": "1800729",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCga1SI",
"doi": "10.1109/TrustCom.2011.202",
"title": "A Power Law Transformation Predicting Lightness Conditions Based on Skin Color Space Detection",
"normalizedTitle": "A Power Law Transformation Predicting Lightness Conditions Based on Skin Color Space Detection",
"abstract": "This paper presents an improved human-skin-color detection approach in images using power law transformation predicting lightness conditions. Our approach focus on different illumination conditions and complex background via RGB-HSV model. The illumination is adjusted without loosing image quality, and skin detection is done in a per pixel fashion after transforming into HSV color space. The experimental results demonstrated that, the proposed technique is able to achieve robust results, where a set of skin color images from ethnic color images from ethnic groups have been detected such as Asian, African and Caucasian.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an improved human-skin-color detection approach in images using power law transformation predicting lightness conditions. Our approach focus on different illumination conditions and complex background via RGB-HSV model. The illumination is adjusted without loosing image quality, and skin detection is done in a per pixel fashion after transforming into HSV color space. The experimental results demonstrated that, the proposed technique is able to achieve robust results, where a set of skin color images from ethnic color images from ethnic groups have been detected such as Asian, African and Caucasian.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an improved human-skin-color detection approach in images using power law transformation predicting lightness conditions. Our approach focus on different illumination conditions and complex background via RGB-HSV model. The illumination is adjusted without loosing image quality, and skin detection is done in a per pixel fashion after transforming into HSV color space. The experimental results demonstrated that, the proposed technique is able to achieve robust results, where a set of skin color images from ethnic color images from ethnic groups have been detected such as Asian, African and Caucasian.",
"fno": "06120998",
"keywords": [
"Image Colour Analysis",
"Lighting",
"Power Law Transformation",
"Lightness Condition Prediction",
"Skin Color Space Detection",
"Human Skin Color Detection",
"Illumination Condition",
"RGB HSV Model",
"Image Quality",
"Skin Color Image",
"Ethnic Color Image",
"Image Color Analysis",
"Skin",
"Lighting",
"Brightness",
"Colored Noise",
"Humans",
"Color",
"Power Law Transformation",
"Color Space",
"Skin Detection And Morphological Filters"
],
"authors": [
{
"affiliation": null,
"fullName": "Emmanuel Kondela",
"givenName": "Emmanuel",
"surname": "Kondela",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Huang Dong Jun",
"givenName": "Huang Dong",
"surname": "Jun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "trustcom",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1472-1476",
"year": "2011",
"issn": "2324-898X",
"isbn": "978-1-4577-2135-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06120997",
"articleId": "12OmNzZmZoe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06120999",
"articleId": "12OmNzl3WVm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iscsct/2008/3498/2/3498b758",
"title": "Hand's Skin Detection Based on Ellipse Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/iscsct/2008/3498b758/12OmNBBzocs",
"parentPublication": {
"id": "proceedings/iscsct/2008/3498/1",
"title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2008/2153/0/04813459",
"title": "Robust hand tracking using a skin tone and depth joint probability model",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2008/04813459/12OmNqOffyb",
"parentPublication": {
"id": "proceedings/fg/2008/2153/0",
"title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2009/3813/0/3813a245",
"title": "A Study of the Effect of Illumination Conditions and Color Spaces on Skin Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2009/3813a245/12OmNrkT7FX",
"parentPublication": {
"id": "proceedings/sibgrapi/2009/3813/0",
"title": "2009 XXII Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011868",
"title": "Skin detection with illumination adaptation in single image",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011868/12OmNy50g21",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2009/3931/1/3931a325",
"title": "Multi-pose Face Detection Based on Adaptive Skin Color and Structure Model",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2009/3931a325/12OmNy6qfKi",
"parentPublication": {
"id": "proceedings/cis/2009/3931/1",
"title": "2009 International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2018/5114/0/511401a248",
"title": "Robust Estimation of Skin Pigmentation from Facial Color Images Based on Color Constancy",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2018/511401a248/12OmNzUPpkz",
"parentPublication": {
"id": "proceedings/icmtma/2018/5114/0",
"title": "2018 10th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2008/3278/0/3278a577",
"title": "Hand Detections Based on Invariant Skin-Color Models Constructed Using Linear and Nonlinear Color Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2008/3278a577/12OmNzXWZIS",
"parentPublication": {
"id": "proceedings/iih-msp/2008/3278/0",
"title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2004/07/i0862",
"title": "Skin Color-Based Video Segmentation under Time-Varying Illumination",
"doi": null,
"abstractUrl": "/journal/tp/2004/07/i0862/13rRUygT7aa",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2018/6950/0/08538604",
"title": "A New Clustering-based Thresholding Method for Human Skin Segmentation Using HSV Color Space",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2018/08538604/17D45Wuc33K",
"parentPublication": {
"id": "proceedings/iscc/2018/6950/0",
"title": "2018 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150626",
"title": "Illumination-based Transformations Improve Skin Lesion Segmentation in Dermoscopic Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150626/1lPHtWATm2A",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrYlmQD",
"title": "2008 Ninth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing",
"acronym": "snpd",
"groupId": "1001811",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNviZlI7",
"doi": "10.1109/SNPD.2008.145",
"title": "Face Detection Based on Skin Gaussian Model and KL Transform",
"normalizedTitle": "Face Detection Based on Skin Gaussian Model and KL Transform",
"abstract": "For face detection in complex background pictures with single or multiple faces, this paper presents a new face-detection method. In YCbCr chrominance space, the distribution of the skin color is described as a Gaussian model and presented as a likelihood image. Otsu method is used to segment the skin pixels; At the same time, the skin regions are located in the KL skin color space, finally the two segmentation results under two chrominance spaces (YCbCr and KL) are fused at the pixel level. Experimental results show that this approach gets a more robust and accurate performance in detecting faces than in any single color space.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For face detection in complex background pictures with single or multiple faces, this paper presents a new face-detection method. In YCbCr chrominance space, the distribution of the skin color is described as a Gaussian model and presented as a likelihood image. Otsu method is used to segment the skin pixels; At the same time, the skin regions are located in the KL skin color space, finally the two segmentation results under two chrominance spaces (YCbCr and KL) are fused at the pixel level. Experimental results show that this approach gets a more robust and accurate performance in detecting faces than in any single color space.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For face detection in complex background pictures with single or multiple faces, this paper presents a new face-detection method. In YCbCr chrominance space, the distribution of the skin color is described as a Gaussian model and presented as a likelihood image. Otsu method is used to segment the skin pixels; At the same time, the skin regions are located in the KL skin color space, finally the two segmentation results under two chrominance spaces (YCbCr and KL) are fused at the pixel level. Experimental results show that this approach gets a more robust and accurate performance in detecting faces than in any single color space.",
"fno": "3263a522",
"keywords": [
"Skin Gaussian Model Otsu Threshold KL Transform Face Detection"
],
"authors": [
{
"affiliation": null,
"fullName": "Wang Chuan-xu",
"givenName": "Wang",
"surname": "Chuan-xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Li Zuo-yong",
"givenName": "Li",
"surname": "Zuo-yong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "snpd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-08-01T00:00:00",
"pubType": "proceedings",
"pages": "522-525",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3263-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3263a517",
"articleId": "12OmNqNG3dI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3263a526",
"articleId": "12OmNxy4MWo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iscsct/2008/3498/2/3498b758",
"title": "Hand's Skin Detection Based on Ellipse Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/iscsct/2008/3498b758/12OmNBBzocs",
"parentPublication": {
"id": "proceedings/iscsct/2008/3498/1",
"title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031b954",
"title": "Skin Color Segmentation Based on Improved 2D Otsu and YCgCr",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031b954/12OmNCbU2SR",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdip/2009/3565/0/3565a017",
"title": "A Novel Color Space Creating Method Applied to Skin Color Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icdip/2009/3565a017/12OmNqyUUDU",
"parentPublication": {
"id": "proceedings/icdip/2009/3565/0",
"title": "Digital Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsap/2010/3960/0/3960a162",
"title": "Segmentation Algorithm for Multiple Face Detection for Color Images with Skin Tone Regions",
"doi": null,
"abstractUrl": "/proceedings-article/icsap/2010/3960a162/12OmNvDqsGk",
"parentPublication": {
"id": "proceedings/icsap/2010/3960/0",
"title": "Signal Acquisition and Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iita/2008/3497/1/3497a457",
"title": "Face Tracking in Video Sequences Using Particle Filter Based on Skin Color Model and Facial Contour",
"doi": null,
"abstractUrl": "/proceedings-article/iita/2008/3497a457/12OmNx9WT1h",
"parentPublication": {
"id": "proceedings/iita/2008/3497/3",
"title": "2008 Second International Symposium on Intelligent Information Technology Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2010/3987/2/3987b708",
"title": "Face Detection Technology Based on Skin Color Segmentation and Template Matching",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2010/3987b708/12OmNyL0TKD",
"parentPublication": {
"id": "proceedings/etcs/2010/3987/2",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wkdd/2008/3090/0/30900339",
"title": "Face Detection in Color Images Using AdaBoost Algorithm Based on Skin Color Information",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2008/30900339/12OmNym2c7S",
"parentPublication": {
"id": "proceedings/wkdd/2008/3090/0",
"title": "International Workshop on Knowledge Discovery and Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isccs/2011/4443/0/4443a100",
"title": "Face Detection Based on YCbCr Gaussian Model and KL Transform",
"doi": null,
"abstractUrl": "/proceedings-article/isccs/2011/4443a100/12OmNz4Bduq",
"parentPublication": {
"id": "proceedings/isccs/2011/4443/0",
"title": "Computer Science and Society, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icciis/2010/4260/0/4260a266",
"title": "Research on a Skin Color Detection Algorithm Based on Self-adaptive Skin Color Model",
"doi": null,
"abstractUrl": "/proceedings-article/icciis/2010/4260a266/12OmNzBOij0",
"parentPublication": {
"id": "proceedings/icciis/2010/4260/0",
"title": "Communications and Intelligence Information Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2022/6399/0/639900a544",
"title": "Face Skin Color Detection Method Based on YUV-KL Transform",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2022/639900a544/1LRlsVRDsxq",
"parentPublication": {
"id": "proceedings/aiam/2022/6399/0",
"title": "2022 4th International Conference on Artificial Intelligence and Advanced Manufacturing (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAPBbgw",
"title": "Services Part II, IEEE Congress on",
"acronym": "services-2",
"groupId": "1002948",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwl8GFv",
"doi": "10.1109/SERVICES-2.2008.21",
"title": "Skin Detection on Images with Color Deviation",
"normalizedTitle": "Skin Detection on Images with Color Deviation",
"abstract": "Skin detection is an important pre-process step in WEB image filter. Since color modal is based on pixel-level color information, the performance of skin detection on image with color deviation is poor. Automatic white balance (AWB) is necessary to increase skin detection accuracy. Considering that almost all nude images contain human faces, human faces can be used as reference object to perform AWB. To begin with, this paper makes use of adaboost face detection algorithm to determine face rectangle, then apply a skin detection method immune to illumination to detect skin pixels within the face rectangle. The skin color of face is then used as reference color to perform AWB before skin detection. Experimental results show that the proposed method can increase skin detection precision significantly.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Skin detection is an important pre-process step in WEB image filter. Since color modal is based on pixel-level color information, the performance of skin detection on image with color deviation is poor. Automatic white balance (AWB) is necessary to increase skin detection accuracy. Considering that almost all nude images contain human faces, human faces can be used as reference object to perform AWB. To begin with, this paper makes use of adaboost face detection algorithm to determine face rectangle, then apply a skin detection method immune to illumination to detect skin pixels within the face rectangle. The skin color of face is then used as reference color to perform AWB before skin detection. Experimental results show that the proposed method can increase skin detection precision significantly.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Skin detection is an important pre-process step in WEB image filter. Since color modal is based on pixel-level color information, the performance of skin detection on image with color deviation is poor. Automatic white balance (AWB) is necessary to increase skin detection accuracy. Considering that almost all nude images contain human faces, human faces can be used as reference object to perform AWB. To begin with, this paper makes use of adaboost face detection algorithm to determine face rectangle, then apply a skin detection method immune to illumination to detect skin pixels within the face rectangle. The skin color of face is then used as reference color to perform AWB before skin detection. Experimental results show that the proposed method can increase skin detection precision significantly.",
"fno": "3313a171",
"keywords": [
"Skin Detection",
"Color Deviation",
"Automatic White Balance"
],
"authors": [
{
"affiliation": null,
"fullName": "Zhiwei Jiang",
"givenName": "Zhiwei",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhaohui Wu",
"givenName": "Zhaohui",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Min Yao",
"givenName": "Min",
"surname": "Yao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "services-2",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-09-01T00:00:00",
"pubType": "proceedings",
"pages": "171-174",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3313-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3313a163",
"articleId": "12OmNyFCvW8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3313a175",
"articleId": "12OmNzGDsJU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icfcc/2009/3591/0/3591a324",
"title": "Some Notes on Accuracy Constraints of Pixel Based Skin Color Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icfcc/2009/3591a324/12OmNBNM8QV",
"parentPublication": {
"id": "proceedings/icfcc/2009/3591/0",
"title": "2009 International Conference on Future Computer and Communication (ICFCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgec/2010/4281/0/4281a687",
"title": "Face Detection Based on Feature Analysis and Edge Detection against Skin Color-like Backgrounds",
"doi": null,
"abstractUrl": "/proceedings-article/icgec/2010/4281a687/12OmNCctfgS",
"parentPublication": {
"id": "proceedings/icgec/2010/4281/0",
"title": "Genetic and Evolutionary Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2010/4270/0/4270a144",
"title": "Face Detection Method Based on Skin Color and AdaBoost Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270a144/12OmNCf1DjM",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2008/3391/0/3391a457",
"title": "Multi-View Face Detection Based on AdaBoost and Skin Color",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2008/3391a457/12OmNqNG3dB",
"parentPublication": {
"id": "proceedings/icinis/2008/3391/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2014/5179/0/06850763",
"title": "Facial features detection in color images based on skin color segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2014/06850763/12OmNsd6vjP",
"parentPublication": {
"id": "proceedings/iciev/2014/5179/0",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsap/2010/3960/0/3960a162",
"title": "Segmentation Algorithm for Multiple Face Detection for Color Images with Skin Tone Regions",
"doi": null,
"abstractUrl": "/proceedings-article/icsap/2010/3960a162/12OmNvDqsGk",
"parentPublication": {
"id": "proceedings/icsap/2010/3960/0",
"title": "Signal Acquisition and Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2009/6325/0/05447254",
"title": "Face Detection Based on AdaBoost and Skin Color",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2009/05447254/12OmNxwENnr",
"parentPublication": {
"id": "proceedings/isise/2009/6325/0",
"title": "2009 Second International Symposium on Information Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicsyn/2011/4482/0/4482a219",
"title": "Face Detection Based on Fuzzy Granulation and Skin Color Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cicsyn/2011/4482a219/12OmNy49sN3",
"parentPublication": {
"id": "proceedings/cicsyn/2011/4482/0",
"title": "Computational Intelligence, Communication Systems and Networks, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2016/9919/0/07459203",
"title": "Incorporating skin color for improved face detection and tracking system",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2016/07459203/12OmNzXWZJq",
"parentPublication": {
"id": "proceedings/ssiai/2016/9919/0",
"title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmwa/2009/3646/0/3646a070",
"title": "Study on Face Detection Algorithm Based on Skin Color Segmentation and AdaBoost Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/wmwa/2009/3646a070/12OmNzlD95K",
"parentPublication": {
"id": "proceedings/wmwa/2009/3646/0",
"title": "Web Mining and Web-based Application, Pacific-Asia Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxuFBoE",
"title": "2015 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"acronym": "iciibms",
"groupId": "1811284",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwtn3C7",
"doi": "10.1109/ICIIBMS.2015.7439539",
"title": "Multi-layered, 3D skin phantoms of human skin in the wavelength range 650–850nm",
"normalizedTitle": "Multi-layered, 3D skin phantoms of human skin in the wavelength range 650–850nm",
"abstract": "Skin and bone phantoms become more and more a relevant for many applications. In this paper we present novel three dimensional, multi-layered skin and bone phantoms. The phantoms consist of four layers, representing human skin layers epidermis, dermis, subcutis/muscle and bone. Phantoms for different ethnicities and age groups as well as phantoms representing different regions of the human body have been developed. Additionally, a three dimensional phantom head with real human hair for head hair and eyebrows was constructed. Different material have been examined and estimated in this work. The phantoms matching the same optical properties (reflection, absorption, transmission) as human skin consist of silicone, epoxy resin, coffee and titanium dioxide.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Skin and bone phantoms become more and more a relevant for many applications. In this paper we present novel three dimensional, multi-layered skin and bone phantoms. The phantoms consist of four layers, representing human skin layers epidermis, dermis, subcutis/muscle and bone. Phantoms for different ethnicities and age groups as well as phantoms representing different regions of the human body have been developed. Additionally, a three dimensional phantom head with real human hair for head hair and eyebrows was constructed. Different material have been examined and estimated in this work. The phantoms matching the same optical properties (reflection, absorption, transmission) as human skin consist of silicone, epoxy resin, coffee and titanium dioxide.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Skin and bone phantoms become more and more a relevant for many applications. In this paper we present novel three dimensional, multi-layered skin and bone phantoms. The phantoms consist of four layers, representing human skin layers epidermis, dermis, subcutis/muscle and bone. Phantoms for different ethnicities and age groups as well as phantoms representing different regions of the human body have been developed. Additionally, a three dimensional phantom head with real human hair for head hair and eyebrows was constructed. Different material have been examined and estimated in this work. The phantoms matching the same optical properties (reflection, absorption, transmission) as human skin consist of silicone, epoxy resin, coffee and titanium dioxide.",
"fno": "07439539",
"keywords": [
"Skin",
"Phantoms",
"Optical Refraction",
"Optical Variables Control",
"Optical Imaging",
"Optical Scattering"
],
"authors": [
{
"affiliation": "Institute of Micro Technology and Medical Device Technology (MIMED) Technische Universität München, D-85748 Garching, Germany",
"fullName": "Franziska S. Goerlach",
"givenName": "Franziska S.",
"surname": "Goerlach",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Micro Technology and Medical Device Technology (MIMED) Technische Universität München, D-85748 Garching, Germany",
"fullName": "Nikolai Striffler",
"givenName": "Nikolai",
"surname": "Striffler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Micro Technology and Medical Device Technology (MIMED) Technische Universität München, D-85748 Garching, Germany",
"fullName": "Tobias Lueddemann",
"givenName": "Tobias",
"surname": "Lueddemann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Micro Technology and Medical Device Technology (MIMED) Technische Universität München, D-85748 Garching, Germany",
"fullName": "Tim C. Lueth",
"givenName": "Tim C.",
"surname": "Lueth",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iciibms",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-11-01T00:00:00",
"pubType": "proceedings",
"pages": "250-256",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-8562-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07439538",
"articleId": "12OmNrJROX1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07439540",
"articleId": "12OmNviZlf8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460057",
"title": "Head posture detection using skin and hair information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460057/12OmNqIhG7i",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019339",
"title": "Automatic skin and hair masking using fully convolutional networks",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019339/12OmNqJq4m9",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2015/7143/0/7143a374",
"title": "Detection and Evaluation on the Spine Tracking Accuracy during Image-Guided Radiation Therapy",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2015/7143a374/12OmNrHSD3n",
"parentPublication": {
"id": "proceedings/icmtma/2015/7143/0",
"title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icacc/2013/5033/0/06686405",
"title": "Design of 3-D Phantoms for Human Carotid Vasculature",
"doi": null,
"abstractUrl": "/proceedings-article/icacc/2013/06686405/12OmNrkBwEL",
"parentPublication": {
"id": "proceedings/icacc/2013/5033/0",
"title": "2013 Third International Conference on Advances in Computing and Communications (ICACC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761885",
"title": "Measuring skin reflectance parameters",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761885/12OmNviZlxw",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260043",
"title": "Realization of Human Skin-like Texture by Emulating Surface Shape Pattern and Elastic Structure",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260043/12OmNxWcH0O",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/his/2009/3745/3/3745c057",
"title": "Fuzzy Mamdani Inference System Skin Detection",
"doi": null,
"abstractUrl": "/proceedings-article/his/2009/3745c057/12OmNyQYtmA",
"parentPublication": {
"id": "proceedings/his/2009/3745/3",
"title": "Hybrid Intelligent Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdcat/2017/3151/0/315101a117",
"title": "Segmentation and Classification of Skin Cancer Melanoma from Skin Lesion Images",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2017/315101a117/12OmNzd7bj6",
"parentPublication": {
"id": "proceedings/pdcat/2017/3151/0",
"title": "2017 18th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2009/3858/0/04810819",
"title": "Measurement of the detection thresholds of hair on human hairy skin using direct vibrotactile stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2009/04810819/12OmNzhna7f",
"parentPublication": {
"id": "proceedings/whc/2009/3858/0",
"title": "World Haptics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2020/7397/0/739700a463",
"title": "Pore Detection from Human Skin Image using U-Net",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2020/739700a463/1tGcrlrC7pS",
"parentPublication": {
"id": "proceedings/iiai-aai/2020/7397/0",
"title": "2020 9th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxX3uMS",
"title": "Wireless Networks and Information Systems, International Conference on",
"acronym": "wnis",
"groupId": "1003076",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxA3YTf",
"doi": "10.1109/WNIS.2009.29",
"title": "Face Tracking Based on Fusion Skin Color Model and Optical Flow Algorithm",
"normalizedTitle": "Face Tracking Based on Fusion Skin Color Model and Optical Flow Algorithm",
"abstract": "A fusion algorithm based on skin color model and optical flow is proposed to detect and track faces. It uses skin color model to detect faces and adopts optical flow algorithm to estimate the continuity of the video frames and obtain the position of faces in the frames. Taking advantages of the algorithms, the proposed method, in some degrees, gives a solution to the effects of face rotation, partial occlusion, expression variation and illumination on face detecting and face tracking.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A fusion algorithm based on skin color model and optical flow is proposed to detect and track faces. It uses skin color model to detect faces and adopts optical flow algorithm to estimate the continuity of the video frames and obtain the position of faces in the frames. Taking advantages of the algorithms, the proposed method, in some degrees, gives a solution to the effects of face rotation, partial occlusion, expression variation and illumination on face detecting and face tracking.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A fusion algorithm based on skin color model and optical flow is proposed to detect and track faces. It uses skin color model to detect faces and adopts optical flow algorithm to estimate the continuity of the video frames and obtain the position of faces in the frames. Taking advantages of the algorithms, the proposed method, in some degrees, gives a solution to the effects of face rotation, partial occlusion, expression variation and illumination on face detecting and face tracking.",
"fno": "3901a089",
"keywords": [
"Skin Color Model Optical Algorithm Face Detection Fusion Algorithm"
],
"authors": [
{
"affiliation": null,
"fullName": "Xiaogang Zhao",
"givenName": "Xiaogang",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yanbo Hui",
"givenName": "Yanbo",
"surname": "Hui",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wnis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-12-01T00:00:00",
"pubType": "proceedings",
"pages": "89-92",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3901-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3901a085",
"articleId": "12OmNAoUTfw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3901a093",
"articleId": "12OmNzcPAIb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2014/5179/0/06850755",
"title": "Face detection using skin color modeling and geometric feature",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2014/06850755/12OmNBh8gTs",
"parentPublication": {
"id": "proceedings/iciev/2014/5179/0",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgec/2010/4281/0/4281a687",
"title": "Face Detection Based on Feature Analysis and Edge Detection against Skin Color-like Backgrounds",
"doi": null,
"abstractUrl": "/proceedings-article/icgec/2010/4281a687/12OmNCctfgS",
"parentPublication": {
"id": "proceedings/icgec/2010/4281/0",
"title": "Genetic and Evolutionary Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2010/4270/0/4270a144",
"title": "Face Detection Method Based on Skin Color and AdaBoost Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270a144/12OmNCf1DjM",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmlc/2003/7865/5/01260064",
"title": "Real-time human face detection in color image",
"doi": null,
"abstractUrl": "/proceedings-article/icmlc/2003/01260064/12OmNCgJebQ",
"parentPublication": {
"id": "proceedings/icmlc/2003/7865/1",
"title": "Proceedings of the 2003 International Conference on Machine Learning and Cybernetics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services-2/2008/3313/0/3313a171",
"title": "Skin Detection on Images with Color Deviation",
"doi": null,
"abstractUrl": "/proceedings-article/services-2/2008/3313a171/12OmNwl8GFv",
"parentPublication": {
"id": "proceedings/services-2/2008/3313/0",
"title": "Services Part II, IEEE Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicsyn/2011/4482/0/4482a219",
"title": "Face Detection Based on Fuzzy Granulation and Skin Color Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cicsyn/2011/4482a219/12OmNy49sN3",
"parentPublication": {
"id": "proceedings/cicsyn/2011/4482/0",
"title": "Computational Intelligence, Communication Systems and Networks, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2010/3987/2/3987b708",
"title": "Face Detection Technology Based on Skin Color Segmentation and Template Matching",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2010/3987b708/12OmNyL0TKD",
"parentPublication": {
"id": "proceedings/etcs/2010/3987/2",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2016/9919/0/07459203",
"title": "Incorporating skin color for improved face detection and tracking system",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2016/07459203/12OmNzXWZJq",
"parentPublication": {
"id": "proceedings/ssiai/2016/9919/0",
"title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmwa/2009/3646/0/3646a070",
"title": "Study on Face Detection Algorithm Based on Skin Color Segmentation and AdaBoost Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/wmwa/2009/3646a070/12OmNzlD95K",
"parentPublication": {
"id": "proceedings/wmwa/2009/3646/0",
"title": "Web Mining and Web-based Application, Pacific-Asia Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2022/6399/0/639900a544",
"title": "Face Skin Color Detection Method Based on YUV-KL Transform",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2022/639900a544/1LRlsVRDsxq",
"parentPublication": {
"id": "proceedings/aiam/2022/6399/0",
"title": "2022 4th International Conference on Artificial Intelligence and Advanced Manufacturing (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvkYx7C",
"title": "Computational Intelligence, Communication Systems and Networks, International Conference on",
"acronym": "cicsyn",
"groupId": "1002865",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy49sN3",
"doi": "10.1109/CICSyN.2011.55",
"title": "Face Detection Based on Fuzzy Granulation and Skin Color Segmentation",
"normalizedTitle": "Face Detection Based on Fuzzy Granulation and Skin Color Segmentation",
"abstract": "Face Detection is the process of determining the face location, size and number. Robust face detection in complex background is a challenging task. In this paper, a novel method based on skin color segmentation and classification with fuzzy information granulation (FIG) for robust and fast face detection in color images is proposed. In this method, firstly we use skin color segmentation to extract face candidates. After normalization of candidate faces, a fuzzy information granulation based classifier is used to classify face or non-face patterns. Experimental results show that this method is effective and fast with high accuracy in detecting faces in color images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Face Detection is the process of determining the face location, size and number. Robust face detection in complex background is a challenging task. In this paper, a novel method based on skin color segmentation and classification with fuzzy information granulation (FIG) for robust and fast face detection in color images is proposed. In this method, firstly we use skin color segmentation to extract face candidates. After normalization of candidate faces, a fuzzy information granulation based classifier is used to classify face or non-face patterns. Experimental results show that this method is effective and fast with high accuracy in detecting faces in color images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Face Detection is the process of determining the face location, size and number. Robust face detection in complex background is a challenging task. In this paper, a novel method based on skin color segmentation and classification with fuzzy information granulation (FIG) for robust and fast face detection in color images is proposed. In this method, firstly we use skin color segmentation to extract face candidates. After normalization of candidate faces, a fuzzy information granulation based classifier is used to classify face or non-face patterns. Experimental results show that this method is effective and fast with high accuracy in detecting faces in color images.",
"fno": "4482a219",
"keywords": [
"Face Detection",
"Fuzzy Information Granulation",
"Classification",
"Skin Color Segmentation"
],
"authors": [
{
"affiliation": null,
"fullName": "Mehrdad Shemshaki",
"givenName": "Mehrdad",
"surname": "Shemshaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Roya Amjadifard",
"givenName": "Roya",
"surname": "Amjadifard",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cicsyn",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-07-01T00:00:00",
"pubType": "proceedings",
"pages": "219-224",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4482-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4482a213",
"articleId": "12OmNz2TCCA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4482a225",
"articleId": "12OmNz2TCIP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icgec/2010/4281/0/4281a687",
"title": "Face Detection Based on Feature Analysis and Edge Detection against Skin Color-like Backgrounds",
"doi": null,
"abstractUrl": "/proceedings-article/icgec/2010/4281a687/12OmNCctfgS",
"parentPublication": {
"id": "proceedings/icgec/2010/4281/0",
"title": "Genetic and Evolutionary Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2010/4270/0/4270a144",
"title": "Face Detection Method Based on Skin Color and AdaBoost Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270a144/12OmNCf1DjM",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services-2/2008/3313/0/3313a171",
"title": "Skin Detection on Images with Color Deviation",
"doi": null,
"abstractUrl": "/proceedings-article/services-2/2008/3313a171/12OmNwl8GFv",
"parentPublication": {
"id": "proceedings/services-2/2008/3313/0",
"title": "Services Part II, IEEE Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wnis/2009/3901/0/3901a089",
"title": "Face Tracking Based on Fusion Skin Color Model and Optical Flow Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/wnis/2009/3901a089/12OmNxA3YTf",
"parentPublication": {
"id": "proceedings/wnis/2009/3901/0",
"title": "Wireless Networks and Information Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420591",
"title": "Face detection by fuzzy pattern matching",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420591/12OmNxHJ9rY",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icetet/2010/4246/0/4246a225",
"title": "Face Detection Using Fuzzy Logic and Skin Color Segmentation in Images",
"doi": null,
"abstractUrl": "/proceedings-article/icetet/2010/4246a225/12OmNyvGymP",
"parentPublication": {
"id": "proceedings/icetet/2010/4246/0",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2016/9919/0/07459203",
"title": "Incorporating skin color for improved face detection and tracking system",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2016/07459203/12OmNzXWZJq",
"parentPublication": {
"id": "proceedings/ssiai/2016/9919/0",
"title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmwa/2009/3646/0/3646a070",
"title": "Study on Face Detection Algorithm Based on Skin Color Segmentation and AdaBoost Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/wmwa/2009/3646a070/12OmNzlD95K",
"parentPublication": {
"id": "proceedings/wmwa/2009/3646/0",
"title": "Web Mining and Web-based Application, Pacific-Asia Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1999/06/i0557",
"title": "Face Detection From Color Images Using a Fuzzy Pattern Matching Method",
"doi": null,
"abstractUrl": "/journal/tp/1999/06/i0557/13rRUwbs1Tu",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2022/6399/0/639900a544",
"title": "Face Skin Color Detection Method Based on YUV-KL Transform",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2022/639900a544/1LRlsVRDsxq",
"parentPublication": {
"id": "proceedings/aiam/2022/6399/0",
"title": "2022 4th International Conference on Artificial Intelligence and Advanced Manufacturing (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKiqU",
"title": "2018 IEEE Symposium on Computers and Communications (ISCC)",
"acronym": "iscc",
"groupId": "1000156",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45Wuc33K",
"doi": "10.1109/ISCC.2018.8538604",
"title": "A New Clustering-based Thresholding Method for Human Skin Segmentation Using HSV Color Space",
"normalizedTitle": "A New Clustering-based Thresholding Method for Human Skin Segmentation Using HSV Color Space",
"abstract": "Skin detection based on color can be applied in eHealth systems for preventive healthcare and computer-aided diagnosis. These algorithms could be incorporated in acquisition and preprocessing steps of the applications that assist with skincare, as prevention and detection of melanoma. In this paper we present the results of a study that investigated the reduction of the color spectrum in the HSV system for sample-based skin detection of individuals of different ages and ethnicities. The proposed HSV filter reduced the color spectrum by 97.4648{\\%} so as to select candidates for human skin tones. It achieved low sensitivity (54.6333{\\%}) and high specificity (92.6390{\\%}) in human skin detection in color digital images when compared to the performance of other algorithms proposed in the literature. Different from other filters described in the literature which propose a single interval for human skin in the HSV system, this model presents and discusses 13 intervals in the possible spectrum which present a well-defined variation in terms of tone.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Skin detection based on color can be applied in eHealth systems for preventive healthcare and computer-aided diagnosis. These algorithms could be incorporated in acquisition and preprocessing steps of the applications that assist with skincare, as prevention and detection of melanoma. In this paper we present the results of a study that investigated the reduction of the color spectrum in the HSV system for sample-based skin detection of individuals of different ages and ethnicities. The proposed HSV filter reduced the color spectrum by 97.4648{\\%} so as to select candidates for human skin tones. It achieved low sensitivity (54.6333{\\%}) and high specificity (92.6390{\\%}) in human skin detection in color digital images when compared to the performance of other algorithms proposed in the literature. Different from other filters described in the literature which propose a single interval for human skin in the HSV system, this model presents and discusses 13 intervals in the possible spectrum which present a well-defined variation in terms of tone.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Skin detection based on color can be applied in eHealth systems for preventive healthcare and computer-aided diagnosis. These algorithms could be incorporated in acquisition and preprocessing steps of the applications that assist with skincare, as prevention and detection of melanoma. In this paper we present the results of a study that investigated the reduction of the color spectrum in the HSV system for sample-based skin detection of individuals of different ages and ethnicities. The proposed HSV filter reduced the color spectrum by 97.4648{\\%} so as to select candidates for human skin tones. It achieved low sensitivity (54.6333{\\%}) and high specificity (92.6390{\\%}) in human skin detection in color digital images when compared to the performance of other algorithms proposed in the literature. Different from other filters described in the literature which propose a single interval for human skin in the HSV system, this model presents and discusses 13 intervals in the possible spectrum which present a well-defined variation in terms of tone.",
"fno": "08538604",
"keywords": [
"Skin",
"Image Color Analysis",
"Color",
"Mathematical Model",
"Face",
"Sensitivity",
"Clustering",
"Thresholding",
"Skin Detection",
"Color Segmentation",
"HSV"
],
"authors": [
{
"affiliation": "Department of Informatics, Federal Institute Goiano - IF Goiano Ceres, Goias, Brazil",
"fullName": "Rafael Divino Ferreira Feitosa",
"givenName": "Rafael Divino Ferreira",
"surname": "Feitosa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Informatics Federal University of Goias - UFG Goiania, Goias, Brazil",
"fullName": "Anderson da Silva Soares",
"givenName": "Anderson da Silva",
"surname": "Soares",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Sao Paulo and Ribeir˜a Preto Medical School Ribeira Preto, Sao Paulo, Brazil",
"fullName": "Lucas Calabrez Pereyra",
"givenName": "Lucas Calabrez",
"surname": "Pereyra",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iscc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "639-1180",
"year": "2018",
"issn": "1530-1346",
"isbn": "978-1-5386-6950-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08538713",
"articleId": "17D45VTRorP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08538456",
"articleId": "17D45WODar7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2014/5179/0/06850755",
"title": "Face detection using skin color modeling and geometric feature",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2014/06850755/12OmNBh8gTs",
"parentPublication": {
"id": "proceedings/iciev/2014/5179/0",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgec/2010/4281/0/4281a687",
"title": "Face Detection Based on Feature Analysis and Edge Detection against Skin Color-like Backgrounds",
"doi": null,
"abstractUrl": "/proceedings-article/icgec/2010/4281a687/12OmNCctfgS",
"parentPublication": {
"id": "proceedings/icgec/2010/4281/0",
"title": "Genetic and Evolutionary Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2010/4270/0/4270a144",
"title": "Face Detection Method Based on Skin Color and AdaBoost Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270a144/12OmNCf1DjM",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2011/2135/0/06120998",
"title": "A Power Law Transformation Predicting Lightness Conditions Based on Skin Color Space Detection",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2011/06120998/12OmNCga1SI",
"parentPublication": {
"id": "proceedings/trustcom/2011/2135/0",
"title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicn/2014/6929/0/6929a198",
"title": "Human Face Detection in Color Images Using HSV Color Histogram and WLD",
"doi": null,
"abstractUrl": "/proceedings-article/cicn/2014/6929a198/12OmNxxNbP9",
"parentPublication": {
"id": "proceedings/cicn/2014/6929/0",
"title": "2014 International Conference on Computational Intelligence and Communication Networks (CICN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icetet/2010/4246/0/4246a225",
"title": "Face Detection Using Fuzzy Logic and Skin Color Segmentation in Images",
"doi": null,
"abstractUrl": "/proceedings-article/icetet/2010/4246a225/12OmNyvGymP",
"parentPublication": {
"id": "proceedings/icetet/2010/4246/0",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icciis/2010/4260/0/4260a266",
"title": "Research on a Skin Color Detection Algorithm Based on Self-adaptive Skin Color Model",
"doi": null,
"abstractUrl": "/proceedings-article/icciis/2010/4260a266/12OmNzBOij0",
"parentPublication": {
"id": "proceedings/icciis/2010/4260/0",
"title": "Communications and Intelligence Information Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2016/9919/0/07459203",
"title": "Incorporating skin color for improved face detection and tracking system",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2016/07459203/12OmNzXWZJq",
"parentPublication": {
"id": "proceedings/ssiai/2016/9919/0",
"title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926593",
"title": "Universal Skin Detection Without Color Information",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926593/12OmNzZEAF0",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmwa/2009/3646/0/3646a070",
"title": "Study on Face Detection Algorithm Based on Skin Color Segmentation and AdaBoost Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/wmwa/2009/3646a070/12OmNzlD95K",
"parentPublication": {
"id": "proceedings/wmwa/2009/3646/0",
"title": "Web Mining and Web-based Application, Pacific-Asia Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1y4oEtZzwCQ",
"title": "2021 25th International Conference Information Visualisation (IV)",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1y4oI7GF7Pi",
"doi": "10.1109/IV53921.2021.00012",
"title": "Multidimensional Data Visualization for Investigation of Skin Transparency",
"normalizedTitle": "Multidimensional Data Visualization for Investigation of Skin Transparency",
"abstract": "“Skin Transparency” is an important keyword for women of all generations as one of the conditions for beautiful skin. Although, no one has clear definitions of “Skin Transparency”. As it stands, beauty experts are often invited to use visual methods in conducting skin transparency evaluation; however, it has not been still sufficiently clarified which visual properties are related to skin transparency. In this study, we aim to discover the relations between skin image features and sensory evaluations applying real human skin images. Specifically, we investigate “Skin Transparency” by comparing them using the Parallel Coordinate Plots. We observed their complex distributions by the visualization task.",
"abstracts": [
{
"abstractType": "Regular",
"content": "“Skin Transparency” is an important keyword for women of all generations as one of the conditions for beautiful skin. Although, no one has clear definitions of “Skin Transparency”. As it stands, beauty experts are often invited to use visual methods in conducting skin transparency evaluation; however, it has not been still sufficiently clarified which visual properties are related to skin transparency. In this study, we aim to discover the relations between skin image features and sensory evaluations applying real human skin images. Specifically, we investigate “Skin Transparency” by comparing them using the Parallel Coordinate Plots. We observed their complex distributions by the visualization task.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "“Skin Transparency” is an important keyword for women of all generations as one of the conditions for beautiful skin. Although, no one has clear definitions of “Skin Transparency”. As it stands, beauty experts are often invited to use visual methods in conducting skin transparency evaluation; however, it has not been still sufficiently clarified which visual properties are related to skin transparency. In this study, we aim to discover the relations between skin image features and sensory evaluations applying real human skin images. Specifically, we investigate “Skin Transparency” by comparing them using the Parallel Coordinate Plots. We observed their complex distributions by the visualization task.",
"fno": "382700a007",
"keywords": [
"Data Visualisation",
"Feature Extraction",
"Medical Computing",
"Skin",
"Skin Transparency Evaluation",
"Skin Image Features",
"Human Skin Images",
"Multidimensional Data Visualization",
"Parallel Coordinate Plots",
"Visual Methods",
"Visualization",
"Data Visualization",
"Skin",
"Task Analysis",
"Human Skin",
"Transparency",
"Sensory Evaluation",
"Image Analysis",
"Multidimensional Data Visualization"
],
"authors": [
{
"affiliation": "Ochanomizu University,Grad. School of Humanities and Sciences,Tokyo,Japan",
"fullName": "Ami Tochigi",
"givenName": "Ami",
"surname": "Tochigi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ochanomizu University,Grad. School of Humanities and Sciences,Tokyo,Japan",
"fullName": "Takayuki Itoh",
"givenName": "Takayuki",
"surname": "Itoh",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-07-01T00:00:00",
"pubType": "proceedings",
"pages": "7-12",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3827-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "382700a001",
"articleId": "1y4oK7SJJjq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "382700a013",
"articleId": "1y4oGkV80Qo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icci-cc/2012/2795/0/06311149",
"title": "Design of a cognitive user-support system for skin progress analysis using a smart phone",
"doi": null,
"abstractUrl": "/proceedings-article/icci-cc/2012/06311149/12OmNAS9zxx",
"parentPublication": {
"id": "proceedings/icci-cc/2012/2795/0",
"title": "2012 11th IEEE International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460479",
"title": "Skin detection via linear regression tree",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460479/12OmNBiygA2",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607635",
"title": "Interactive skin condition recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607635/12OmNqBtiOT",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdt/2009/3695/0/3695a059",
"title": "Skin Detection Using Contourlet-Based Texture Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icdt/2009/3695a059/12OmNyvoXkf",
"parentPublication": {
"id": "proceedings/icdt/2009/3695/0",
"title": "2009 Fourth International Conference on Digital Telecommunications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904451",
"title": "Multi-View Design Patterns and Responsive Visualization for Genomics Data",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904451/1H1gfVbEsiA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2019/0858/0/09006528",
"title": "Computer-Aided Clinical Skin Disease Diagnosis Using CNN and Object Detection Models",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2019/09006528/1hJrRvvdNAs",
"parentPublication": {
"id": "proceedings/big-data/2019/0858/0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300a425",
"title": "RethNet: Object-by-Object Learning for Detecting Facial Skin Problems",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300a425/1i5mBEwMqIM",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313150",
"title": "Learning to Classify Skin Lesions via Self-Training and Self-Paced Learning",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313150/1qmfN0441FK",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a051",
"title": "An Augmented Reality Mobile Application for Skin Lesion Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a051/1rSRcFKjMzu",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl-hcc/2021/4592/0/09576440",
"title": "ChatrEx: Designing Explainable Chatbot Interfaces for Enhancing Usefulness, Transparency, and Trust",
"doi": null,
"abstractUrl": "/proceedings-article/vl-hcc/2021/09576440/1y63t6uoDGo",
"parentPublication": {
"id": "proceedings/vl-hcc/2021/4592/0",
"title": "2021 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxX3uN0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"acronym": "Ismar-mashd",
"groupId": "1002953",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAndijr",
"doi": "10.1109/ISMAR-AMH.2014.6935434",
"title": "Integrating augmented reality to enhance expression, interaction & collaboration in live performances: A ballet dance case study",
"normalizedTitle": "Integrating augmented reality to enhance expression, interaction & collaboration in live performances: A ballet dance case study",
"abstract": "The democratization of high-end, affordable and off-the-shelf sensors and displays triggered an explosion in the exploration of interaction and projection in arts. Although mostly witnessed in interactive artistic installations (e.g. museums and exhibitions), performing arts also explore such technologies, using interaction and augmented reality as part of the performance. Such works often emerge from collaborations between artists and scientists. Despite being antonymic in appearance, we advocate that both fields can greatly benefit from this type of collaboration. Since 2006 the authors of this paper (from a research laboratory and a national ballet company) have collaborated on augmenting a ballet performance using a dancer's movements for interaction. We focus on large productions using high-end motion capture and projection systems to allow dancers to interact with virtual elements on an augmented stage in front of several hundred people. To achieve this, we introduce an `augmented reality engineer', whose role is to design the augmented reality systems and interactions according to a show's aesthetic and choreographic message, and to control them during the performance alongside light and sound technicians. Our last production: Debussy3.0 is an augmented ballet based on La Mer by Claude Debussy, featuring body interactions by one of the dancers and backstage interactions by the augmented reality engineer. For the first time, we explored 3D stereoscopy as a display technique for augmented reality and interaction in realtime on stage. The show was presented at Biarritz Casino in December 2013 in front of around 700 people. In this paper, we present the Debussy3.0 augmented ballet both as a result of the use of augmented reality in performing arts and as a guiding thread to provide feedback on arts-science collaboration. First, we will describe how the ballet was constructed aesthetically, technically and in its choreography. We will discuss and provide feedback on the use of motion capture and stereoscopy techniques in a live show and will then broaden the scope of discussion, providing feedback on art-science collaboration, the traps and benefits for both parties, and the positive repercussions it can bring to a laboratory when working on industrial projects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The democratization of high-end, affordable and off-the-shelf sensors and displays triggered an explosion in the exploration of interaction and projection in arts. Although mostly witnessed in interactive artistic installations (e.g. museums and exhibitions), performing arts also explore such technologies, using interaction and augmented reality as part of the performance. Such works often emerge from collaborations between artists and scientists. Despite being antonymic in appearance, we advocate that both fields can greatly benefit from this type of collaboration. Since 2006 the authors of this paper (from a research laboratory and a national ballet company) have collaborated on augmenting a ballet performance using a dancer's movements for interaction. We focus on large productions using high-end motion capture and projection systems to allow dancers to interact with virtual elements on an augmented stage in front of several hundred people. To achieve this, we introduce an `augmented reality engineer', whose role is to design the augmented reality systems and interactions according to a show's aesthetic and choreographic message, and to control them during the performance alongside light and sound technicians. Our last production: Debussy3.0 is an augmented ballet based on La Mer by Claude Debussy, featuring body interactions by one of the dancers and backstage interactions by the augmented reality engineer. For the first time, we explored 3D stereoscopy as a display technique for augmented reality and interaction in realtime on stage. The show was presented at Biarritz Casino in December 2013 in front of around 700 people. In this paper, we present the Debussy3.0 augmented ballet both as a result of the use of augmented reality in performing arts and as a guiding thread to provide feedback on arts-science collaboration. First, we will describe how the ballet was constructed aesthetically, technically and in its choreography. We will discuss and provide feedback on the use of motion capture and stereoscopy techniques in a live show and will then broaden the scope of discussion, providing feedback on art-science collaboration, the traps and benefits for both parties, and the positive repercussions it can bring to a laboratory when working on industrial projects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The democratization of high-end, affordable and off-the-shelf sensors and displays triggered an explosion in the exploration of interaction and projection in arts. Although mostly witnessed in interactive artistic installations (e.g. museums and exhibitions), performing arts also explore such technologies, using interaction and augmented reality as part of the performance. Such works often emerge from collaborations between artists and scientists. Despite being antonymic in appearance, we advocate that both fields can greatly benefit from this type of collaboration. Since 2006 the authors of this paper (from a research laboratory and a national ballet company) have collaborated on augmenting a ballet performance using a dancer's movements for interaction. We focus on large productions using high-end motion capture and projection systems to allow dancers to interact with virtual elements on an augmented stage in front of several hundred people. To achieve this, we introduce an `augmented reality engineer', whose role is to design the augmented reality systems and interactions according to a show's aesthetic and choreographic message, and to control them during the performance alongside light and sound technicians. Our last production: Debussy3.0 is an augmented ballet based on La Mer by Claude Debussy, featuring body interactions by one of the dancers and backstage interactions by the augmented reality engineer. For the first time, we explored 3D stereoscopy as a display technique for augmented reality and interaction in realtime on stage. The show was presented at Biarritz Casino in December 2013 in front of around 700 people. In this paper, we present the Debussy3.0 augmented ballet both as a result of the use of augmented reality in performing arts and as a guiding thread to provide feedback on arts-science collaboration. First, we will describe how the ballet was constructed aesthetically, technically and in its choreography. We will discuss and provide feedback on the use of motion capture and stereoscopy techniques in a live show and will then broaden the scope of discussion, providing feedback on art-science collaboration, the traps and benefits for both parties, and the positive repercussions it can bring to a laboratory when working on industrial projects.",
"fno": "06935434",
"keywords": [
"Three Dimensional Displays",
"Augmented Reality",
"Avatars",
"Art",
"Visualization",
"Collaboration",
"Augmented Reality",
"Augmented Performance",
"Movement Interaction",
"Dance"
],
"authors": [
{
"affiliation": "ESTIA, Bidart, France",
"fullName": "Alexis Clay",
"givenName": "Alexis",
"surname": "Clay",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CCN Malandain Ballet Biarritz, Biarritz, France",
"fullName": "Gael Domenger",
"givenName": "Gael",
"surname": "Domenger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ESTIA, Bidart, France",
"fullName": "Julien Conan",
"givenName": "Julien",
"surname": "Conan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CCN Malandain Ballet Biarritz, Biarritz, France",
"fullName": "Axel Domenger",
"givenName": "Axel",
"surname": "Domenger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ESTIA, Bidart, France",
"fullName": "Nadine Couture",
"givenName": "Nadine",
"surname": "Couture",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "Ismar-mashd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "21-29",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6887-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06935433",
"articleId": "12OmNC1Y5qv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06935435",
"articleId": "12OmNrJAdXk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ds-rt/2011/1643/0/06051788",
"title": "Syncretic Post-Biological Digital Identity: Hybridizing Mixed Reality Data Transfer Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ds-rt/2011/06051788/12OmNwHyZXG",
"parentPublication": {
"id": "proceedings/ds-rt/2011/1643/0",
"title": "2011 IEEE/ACM 15th International Symposium on Distributed Simulation and Real Time Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2012/4663/0/06483986",
"title": "Interactions and systems for augmenting a live dance performance",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2012/06483986/12OmNxTEiUG",
"parentPublication": {
"id": "proceedings/ismar-amh/2012/4663/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2012/4663/0/06483988",
"title": "Co-creativity fusions in interdisciplinary augmented reality game developments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2012/06483988/12OmNxdVgUM",
"parentPublication": {
"id": "proceedings/ismar-amh/2012/4663/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2015/8471/0/8471a001",
"title": "Collaboration in Mediated and Augmented Reality (CiMAR) Summary",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2015/8471a001/12OmNybfqVO",
"parentPublication": {
"id": "proceedings/ismarw/2015/8471/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2008/2357/0/04594739",
"title": "Here and there: Experiencing co-presence through mixed reality-mediated collaborative design system",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2008/04594739/12OmNzRHOVV",
"parentPublication": {
"id": "proceedings/cit/2008/2357/0",
"title": "2008 8th IEEE International Conference on Computer and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798044",
"title": "Effect of Full Body Avatar in Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798044/1cJ14GMFJdK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798128",
"title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798128/1cJ1aXJnUyI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a413",
"title": "Authoring and Visualization Tool for Augmented Scenic Performances Prototyping and Experience",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a413/1oZBBSo7je8",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyjLoRw",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwwMeYe",
"doi": "10.1109/ISMAR.2014.6948507",
"title": "Designing support for collaboration around physical artefacts: Using augmented reality in learning environments",
"normalizedTitle": "Designing support for collaboration around physical artefacts: Using augmented reality in learning environments",
"abstract": "The aim of this thesis is to identify mechanisms for supporting collaboration around physical artefacts in co-located and remote settings. To explore the research question in the project, a Research through Design approach has been adopted. A technology probe — an evolutionary prototype of a remote collaboration system — will be used to fuel the research. The prototype will facilitate collaboration between small groups around physical artefacts in an augmented learning environment. The prototype will inform future collaborative augmented reality technology design.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The aim of this thesis is to identify mechanisms for supporting collaboration around physical artefacts in co-located and remote settings. To explore the research question in the project, a Research through Design approach has been adopted. A technology probe — an evolutionary prototype of a remote collaboration system — will be used to fuel the research. The prototype will facilitate collaboration between small groups around physical artefacts in an augmented learning environment. The prototype will inform future collaborative augmented reality technology design.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The aim of this thesis is to identify mechanisms for supporting collaboration around physical artefacts in co-located and remote settings. To explore the research question in the project, a Research through Design approach has been adopted. A technology probe — an evolutionary prototype of a remote collaboration system — will be used to fuel the research. The prototype will facilitate collaboration between small groups around physical artefacts in an augmented learning environment. The prototype will inform future collaborative augmented reality technology design.",
"fno": "06948507",
"keywords": [
"Collaboration",
"Augmented Reality",
"Remote Learning"
],
"authors": [
{
"affiliation": "School of Information Technology and Electrical Engineering, University of Queensland",
"fullName": "Jason Weigel",
"givenName": "Jason",
"surname": "Weigel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Information Technology and Electrical Engineering, University of Queensland",
"fullName": "Stephen Viller",
"givenName": "Stephen",
"surname": "Viller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute for Teaching and Learning Innovation, University of Queensland",
"fullName": "Mark Schulz",
"givenName": "Mark",
"surname": "Schulz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "405-408",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6184-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06948506",
"articleId": "12OmNyGbI59",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06948508",
"articleId": "12OmNBpVQ3p",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2012/4814/0/4814a077",
"title": "Preliminary Evaluation of an Augmented Reality Collaborative Process Modelling System",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2012/4814a077/12OmNvlg8kc",
"parentPublication": {
"id": "proceedings/cw/2012/4814/0",
"title": "2012 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948412",
"title": "Improving co-presence with augmented visual communication cues for sharing experience through video conference",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948412/12OmNwudQT3",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836453",
"title": "Challenges for Asynchronous Collaboration in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836453/12OmNxaw5c0",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a218",
"title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699225",
"title": "Augmented Reality Remote Collaboration with Dense Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699225/19F1OvIhORa",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798128",
"title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798128/1cJ1aXJnUyI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a028",
"title": "SceneCam: Improving Multi-camera Remote Collaboration using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a028/1gysmKtgeju",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a022",
"title": "Merging Live and Static 360 Panoramas Inside a 3D Scene for Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a022/1gysn0YPLm8",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2021/2463/0/246300b125",
"title": "Remote Software Development: A Student-staff Collaboration to Build a Showcase Platform for Non-traditional Digital Artefacts",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2021/246300b125/1wLcggUFXVe",
"parentPublication": {
"id": "proceedings/compsac/2021/2463/0",
"title": "2021 IEEE 45th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1OvIhORa",
"doi": "10.1109/ISMAR-Adjunct.2018.00028",
"title": "Augmented Reality Remote Collaboration with Dense Reconstruction",
"normalizedTitle": "Augmented Reality Remote Collaboration with Dense Reconstruction",
"abstract": "This paper describes an Augmented Reality remote collaboration system leveraging high-fidelity, dense scene reconstruction for intuitive and precise remote guidance. A local worker in need of help can use our system to automatically generate a 3D mesh of the surrounding and stream it to a remote expert. The remote expert can navigate and explore the reconstructed environment independently of the local worker in six degrees of freedom. World-stabilized text- and image-annotations can be placed in the scene and strokes drawn on surfaces are intelligently positioned in the world. In addition, the reconstruction allows the remote expert to segment colored objects from the mesh and use the resulting 3D model to create simple animations in order to convey precise instructions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper describes an Augmented Reality remote collaboration system leveraging high-fidelity, dense scene reconstruction for intuitive and precise remote guidance. A local worker in need of help can use our system to automatically generate a 3D mesh of the surrounding and stream it to a remote expert. The remote expert can navigate and explore the reconstructed environment independently of the local worker in six degrees of freedom. World-stabilized text- and image-annotations can be placed in the scene and strokes drawn on surfaces are intelligently positioned in the world. In addition, the reconstruction allows the remote expert to segment colored objects from the mesh and use the resulting 3D model to create simple animations in order to convey precise instructions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper describes an Augmented Reality remote collaboration system leveraging high-fidelity, dense scene reconstruction for intuitive and precise remote guidance. A local worker in need of help can use our system to automatically generate a 3D mesh of the surrounding and stream it to a remote expert. The remote expert can navigate and explore the reconstructed environment independently of the local worker in six degrees of freedom. World-stabilized text- and image-annotations can be placed in the scene and strokes drawn on surfaces are intelligently positioned in the world. In addition, the reconstruction allows the remote expert to segment colored objects from the mesh and use the resulting 3D model to create simple animations in order to convey precise instructions.",
"fno": "08699225",
"keywords": [
"Augmented Reality",
"Computer Animation",
"Image Colour Analysis",
"Image Reconstruction",
"Image Segmentation",
"Mesh Generation",
"Solid Modelling",
"Remote Expert",
"Dense Scene Reconstruction",
"Augmented Reality Remote Collaboration System",
"3 D Mesh Generation",
"Colored Objects Segmentation",
"3 D Model",
"Image Reconstruction",
"Three Dimensional Displays",
"Augmented Reality",
"Collaboration",
"Glass",
"Real Time Systems",
"Tools",
"Augmented Reality",
"CSCW",
"Remote Collaboration",
"Telepresence",
"Dense Reconstruction",
"Human Centered Computing",
"Human Computer Interaction",
"Interaction Paradigms",
"Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "DAQRI Vienna",
"fullName": "Jakob Zillner",
"givenName": "Jakob",
"surname": "Zillner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "DAQRI Vienna",
"fullName": "Erick Mendez",
"givenName": "Erick",
"surname": "Mendez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "DAQRI Vienna",
"fullName": "Daniel Wagner",
"givenName": "Daniel",
"surname": "Wagner",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "38-39",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699300",
"articleId": "19F1SHbDTJC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699244",
"articleId": "19F1QjoSiZ2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismarw/2016/3740/0/07836533",
"title": "A Remote Collaboration System with Empathy Glasses",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836533/12OmNCvumT0",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550237",
"title": "Poster: 3D referencing for remote task assistance in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550237/12OmNqC2uWf",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402584",
"title": "A waist-mounted ProCam system for remote collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402584/12OmNwGZNJB",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671795",
"title": "Study of augmented gesture communication cues and view sharing in remote collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671795/12OmNwl8GBu",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671839",
"title": "Towards object based manipulation in remote guidance",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671839/12OmNxd4tri",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2009/3557/2/3557c670",
"title": "Collabrative Education UI in Augmented Reality from Remote to Local",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2009/3557c670/12OmNy6HQUT",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a218",
"title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798044",
"title": "Effect of Full Body Avatar in Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798044/1cJ14GMFJdK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a091",
"title": "An MR Remote Collaborative Platform Based on 3D CAD Models for Training in Industry",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a091/1gysneD006s",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.