data
dict
{ "proceeding": { "id": "12OmNxdm4Is", "title": "Eighth International Symposium on Wearable Computers", "acronym": "iswc", "groupId": "1000810", "volume": "0", "displayVolume": "1", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNz5JC4E", "doi": "10.1109/ISWC.2004.32", "title": "My Own Private Kiosk: Privacy-Preserving Public Displays", "normalizedTitle": "My Own Private Kiosk: Privacy-Preserving Public Displays", "abstract": "Ubiquitous, high-resolution, large public displays offer an attractive complement to wearable displays. Unfortunately, the inherently public nature of these public displays makes them unsuitable for displaying sensitive information. We present EyeGuide, a wearable system that allows the user to obtain information quickly from a public display without sacrificing privacy. To this end, EyeGuide employs a lightweight head-worn eye-tracker for hands-free object selection and an earphone for private communication. Our system supports public displays that are dynamic (e.g., a large plasma screen) and static (e.g., a large printed map). In our printed map scenario, EyeGuide whispers verbal directions via earphone to a user, based on where they are looking on the map. Using a technique we call \"gaze steering,\" the system guides the user's eye position to specific locations. In our dynamic public display scenarios, EyeGuide presents documents (e.g., maps) that contain sensitive data in a way that preserves privacy.", "abstracts": [ { "abstractType": "Regular", "content": "Ubiquitous, high-resolution, large public displays offer an attractive complement to wearable displays. Unfortunately, the inherently public nature of these public displays makes them unsuitable for displaying sensitive information. We present EyeGuide, a wearable system that allows the user to obtain information quickly from a public display without sacrificing privacy. To this end, EyeGuide employs a lightweight head-worn eye-tracker for hands-free object selection and an earphone for private communication. Our system supports public displays that are dynamic (e.g., a large plasma screen) and static (e.g., a large printed map). In our printed map scenario, EyeGuide whispers verbal directions via earphone to a user, based on where they are looking on the map. Using a technique we call \"gaze steering,\" the system guides the user's eye position to specific locations. In our dynamic public display scenarios, EyeGuide presents documents (e.g., maps) that contain sensitive data in a way that preserves privacy.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Ubiquitous, high-resolution, large public displays offer an attractive complement to wearable displays. Unfortunately, the inherently public nature of these public displays makes them unsuitable for displaying sensitive information. We present EyeGuide, a wearable system that allows the user to obtain information quickly from a public display without sacrificing privacy. To this end, EyeGuide employs a lightweight head-worn eye-tracker for hands-free object selection and an earphone for private communication. Our system supports public displays that are dynamic (e.g., a large plasma screen) and static (e.g., a large printed map). In our printed map scenario, EyeGuide whispers verbal directions via earphone to a user, based on where they are looking on the map. Using a technique we call \"gaze steering,\" the system guides the user's eye position to specific locations. In our dynamic public display scenarios, EyeGuide presents documents (e.g., maps) that contain sensitive data in a way that preserves privacy.", "fno": "21860132", "keywords": [ "Public Display Privacy Eye Tracking Gaze Contingent Gaze Steering Audio Augmented Reality" ], "authors": [ { "affiliation": "Columbia University", "fullName": "Marc Eaddy", "givenName": "Marc", "surname": "Eaddy", "__typename": "ArticleAuthorType" }, { "affiliation": "Columbia University", "fullName": "G?bor Blask?", "givenName": "G?bor", "surname": "Blask?", "__typename": "ArticleAuthorType" }, { "affiliation": "Rochester Institute of Technology", "fullName": "Jason Babcock", "givenName": "Jason", "surname": "Babcock", "__typename": "ArticleAuthorType" }, { "affiliation": "Columbia University", "fullName": "Steven Feiner", "givenName": "Steven", "surname": "Feiner", "__typename": "ArticleAuthorType" } ], "idPrefix": "iswc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-10-01T00:00:00", "pubType": "proceedings", "pages": "132-135", "year": "2004", "issn": "1530-0811", "isbn": "0-7695-2186-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "21860128", "articleId": "12OmNwErpPy", "__typename": "AdjacentArticleType" }, "next": { "fno": "21860138", "articleId": "12OmNBKEyse", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/mobiquitous/2004/2208/0/22080374", "title": "ContentCascade Incremental Content Exchange between Public Displays and Personal Devices", "doi": null, "abstractUrl": "/proceedings-article/mobiquitous/2004/22080374/12OmNrAv3KX", "parentPublication": { "id": "proceedings/mobiquitous/2004/2208/0", "title": "Mobile and Ubiquitous Systems, Annual International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uic-atc/2013/2482/0/06726208", "title": "FCT4U -- When Private Mobile Displays Meet Public Situated Displays to Enhance the User Experience", "doi": null, "abstractUrl": "/proceedings-article/uic-atc/2013/06726208/12OmNxZTtGI", "parentPublication": { "id": "proceedings/uic-atc/2013/2482/0", "title": "2013 IEEE 10th International Conference on Ubiquitous Intelligence & Computing and 2013 IEEE 10th International Conference on Autonomic & Trusted Computing (UIC/ATC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ie/2010/4149/0/4149a040", "title": "A Framework for Auditory Displays in Intelligent Environments", "doi": null, "abstractUrl": "/proceedings-article/ie/2010/4149a040/12OmNzICEOa", "parentPublication": { "id": "proceedings/ie/2010/4149/0", "title": "Intelligent Environments, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom/2005/2299/0/22990139", "title": "Using Symbiotic Displays to View Sensitive Information in Public", "doi": null, "abstractUrl": "/proceedings-article/percom/2005/22990139/12OmNzVGcOm", "parentPublication": { "id": "proceedings/percom/2005/2299/0", "title": "Third IEEE International Conference on Pervasive Computing and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom/2008/3113/0/3113a509", "title": "MAGIC Broker: A Middleware Toolkit for Interactive Public Displays", "doi": null, "abstractUrl": "/proceedings-article/percom/2008/3113a509/12OmNzlUKgQ", "parentPublication": { "id": "proceedings/percom/2008/3113/0", "title": "2008 Sixth Annual IEEE International Conference on Pervasive Computing and Communications (PerCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nfc/2011/4327/0/4327a015", "title": "Touch to Play -- Exploring Touch-Based Mobile Interaction with Public Displays", "doi": null, "abstractUrl": "/proceedings-article/nfc/2011/4327a015/12OmNzn38XH", "parentPublication": { "id": "proceedings/nfc/2011/4327/0", "title": "Near Field Communication, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/02/mcg2013020028", "title": "Making public displays interactive everywhere", "doi": null, "abstractUrl": "/magazine/cg/2013/02/mcg2013020028/13rRUwhpBIq", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2016/03/mpc2016030024", "title": "Toward Meaningful Engagement with Pervasive Displays", "doi": null, "abstractUrl": "/magazine/pc/2016/03/mpc2016030024/13rRUy2YLQi", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2013/01/mpc2013010008", "title": "Public Displays Invade Urban Spaces", "doi": null, "abstractUrl": "/magazine/pc/2013/01/mpc2013010008/13rRUy2YLVu", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percomw/2018/3227/0/08480284", "title": "Billing Models for Public Displays in Smart Cities", "doi": null, "abstractUrl": "/proceedings-article/percomw/2018/08480284/17D45VTRotw", "parentPublication": { "id": "proceedings/percomw/2018/3227/0", "title": "2018 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJbEwHHqEg", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJbTrAdAju", "doi": "10.1109/VR51125.2022.00059", "title": "Real-Time Gaze Tracking with Event-Driven Eye Segmentation", "normalizedTitle": "Real-Time Gaze Tracking with Event-Driven Eye Segmentation", "abstract": "Gaze tracking is increasingly becoming an essential component in Augmented and Virtual Reality. Modern gaze tracking algorithms are heavyweight; they operate at most 5 Hz on mobile processors despite that near-eye cameras comfortably operate at a real-time rate (> 30 Hz). This paper presents a real-time eye tracking algorithm that, on average, operates at 30 Hz on a mobile processor, achieves 0.1°–0.5° gaze accuracies, all the while requiring only 30K parameters, one to two orders of magnitude smaller than state-of-the-art eye tracking algorithms. The crux of our algorithm is an Auto ROI mode, which continuously predicts the Regions of Interest (ROIs) of near-eye images and judiciously processes only the ROIs for gaze estimation. To that end, we introduce a novel, lightweight ROI prediction algorithm by emulating an event camera. We discuss how a software emulation of events enables accurate ROI prediction without requiring special hardware. The code of our paper is available at https://github.com/horizon-research/edgaze.", "abstracts": [ { "abstractType": "Regular", "content": "Gaze tracking is increasingly becoming an essential component in Augmented and Virtual Reality. Modern gaze tracking algorithms are heavyweight; they operate at most 5 Hz on mobile processors despite that near-eye cameras comfortably operate at a real-time rate (> 30 Hz). This paper presents a real-time eye tracking algorithm that, on average, operates at 30 Hz on a mobile processor, achieves 0.1°–0.5° gaze accuracies, all the while requiring only 30K parameters, one to two orders of magnitude smaller than state-of-the-art eye tracking algorithms. The crux of our algorithm is an Auto ROI mode, which continuously predicts the Regions of Interest (ROIs) of near-eye images and judiciously processes only the ROIs for gaze estimation. To that end, we introduce a novel, lightweight ROI prediction algorithm by emulating an event camera. We discuss how a software emulation of events enables accurate ROI prediction without requiring special hardware. The code of our paper is available at https://github.com/horizon-research/edgaze.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Gaze tracking is increasingly becoming an essential component in Augmented and Virtual Reality. Modern gaze tracking algorithms are heavyweight; they operate at most 5 Hz on mobile processors despite that near-eye cameras comfortably operate at a real-time rate (> 30 Hz). This paper presents a real-time eye tracking algorithm that, on average, operates at 30 Hz on a mobile processor, achieves 0.1°–0.5° gaze accuracies, all the while requiring only 30K parameters, one to two orders of magnitude smaller than state-of-the-art eye tracking algorithms. The crux of our algorithm is an Auto ROI mode, which continuously predicts the Regions of Interest (ROIs) of near-eye images and judiciously processes only the ROIs for gaze estimation. To that end, we introduce a novel, lightweight ROI prediction algorithm by emulating an event camera. We discuss how a software emulation of events enables accurate ROI prediction without requiring special hardware. The code of our paper is available at https://github.com/horizon-research/edgaze.", "fno": "961700a399", "keywords": [ "Augmented Reality", "Eye", "Gaze Tracking", "Image Segmentation", "Mobile Computing", "Multiprocessing Systems", "Regions Of Interest Prediction", "Augmented Reality", "Real Time Eye Tracking", "Near Eye Cameras", "Mobile Processor", "Virtual Reality", "Event Driven Eye Segmentation", "Real Time Gaze Tracking", "ROI Prediction", "Event Camera", "Gaze Estimation", "Near Eye Images", "Solid Modeling", "Three Dimensional Displays", "Software Algorithms", "Gaze Tracking", "Virtual Reality", "User Interfaces", "Prediction Algorithms", "Gaze", "Eye Tracking", "Event Camera", "Segmentation" ], "authors": [ { "affiliation": "University of Rochester", "fullName": "Yu Feng", "givenName": "Yu", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Rochester", "fullName": "Nathan Goulding-Hotta", "givenName": "Nathan", "surname": "Goulding-Hotta", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Rochester", "fullName": "Asif Khan", "givenName": "Asif", "surname": "Khan", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Rochester", "fullName": "Hans Reyserhove", "givenName": "Hans", "surname": "Reyserhove", "__typename": "ArticleAuthorType" }, { "affiliation": "Reality Labs Research", "fullName": "Yuhao Zhu", "givenName": "Yuhao", "surname": "Zhu", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "399-408", "year": "2022", "issn": null, "isbn": "978-1-6654-9617-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1CJbSY8LeOA", "name": "pvr202296170-09756796s1-mm_961700a399.zip", "size": "203 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202296170-09756796s1-mm_961700a399.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "961700a389", "articleId": "1CJbVF427gQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "961700a409", "articleId": "1CJceLbqObe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icinis/2010/4249/0/4249a048", "title": "Implementation and Optimization of the Eye Gaze Tracking System Based on DM642", "doi": null, "abstractUrl": "/proceedings-article/icinis/2010/4249a048/12OmNs4S8I4", "parentPublication": { "id": "proceedings/icinis/2010/4249/0", "title": "Intelligent Networks and Intelligent Systems, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032b003", "title": "Real Time Eye Gaze Tracking with 3D Deformable Eye-Face Model", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b003/12OmNwNeYAV", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgames/2011/1451/0/06000327", "title": "Gaze tracking as a game input interface", "doi": null, "abstractUrl": "/proceedings-article/cgames/2011/06000327/12OmNxRWIeo", "parentPublication": { "id": "proceedings/cgames/2011/1451/0", "title": "2011 16th International Conference on Computer Games (CGAMES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wkdd/2009/3543/0/3543a594", "title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application", "doi": null, "abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn", "parentPublication": { "id": "proceedings/wkdd/2009/3543/0", "title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2018/6857/0/08643332", "title": "Open framework for error-compensated gaze data collection with eye tracking glasses", "doi": null, "abstractUrl": "/proceedings-article/ism/2018/08643332/17QjJdei3Y0", "parentPublication": { "id": "proceedings/ism/2018/6857/0", "title": "2018 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a594", "title": "High-speed Gaze-oriented Projection by Cross-ratio-based Eye Tracking with Dual Infrared Imaging", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a594/1CJewqWywOk", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956312", "title": "A Joint Cascaded Framework for Simultaneous Eye State, Eye Center, and Gaze Estimation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956312/1IHq8em8jug", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a082", "title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09389490", "title": "Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz", "doi": null, "abstractUrl": "/journal/tg/2021/05/09389490/1smZT5W55V6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a554", "title": "Estimating Gaze From Head and Hand Pose and Scene Images for Open-Ended Exploration in VR Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a554/1tnY5akLwvS", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1GU6OXhOl4k", "title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)", "acronym": "iiai-aai", "groupId": "1801921", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1GU75yVJubS", "doi": "10.1109/IIAIAAI55812.2022.00042", "title": "Development and evaluation of car training system using VR and eye tracking technology", "normalizedTitle": "Development and evaluation of car training system using VR and eye tracking technology", "abstract": "When practicing car driving in a real world, there are risks like traffic accidents. With the development of virtual reality technology, we can create environments in which people can practice without being exposed to certain dangers like traffic accidents that potentially occur in real environments. So far, many researches regarding car driving in VR have been developed, but little attention has been paid to analyzing and visualizing eye gaze data collected in VR education systems. Analyzing and visualizing the collected eye gaze data is potential to grasp information of whether learners overlooked objects such as traffic signals and signs. In this study, we developed a driving simulator in VR and collected eye tracking data from five university students while they were practicing driving using eye tracking technology. Also, we have developed a system that allows learners to reflect on their own driving using the collected their eye gaze data. We compared the changes in gaze between those who received feedback to improve their driving skills and those who did not. The results showed that the feedback was effective because it reduced the number of oversights while driving.", "abstracts": [ { "abstractType": "Regular", "content": "When practicing car driving in a real world, there are risks like traffic accidents. With the development of virtual reality technology, we can create environments in which people can practice without being exposed to certain dangers like traffic accidents that potentially occur in real environments. So far, many researches regarding car driving in VR have been developed, but little attention has been paid to analyzing and visualizing eye gaze data collected in VR education systems. Analyzing and visualizing the collected eye gaze data is potential to grasp information of whether learners overlooked objects such as traffic signals and signs. In this study, we developed a driving simulator in VR and collected eye tracking data from five university students while they were practicing driving using eye tracking technology. Also, we have developed a system that allows learners to reflect on their own driving using the collected their eye gaze data. We compared the changes in gaze between those who received feedback to improve their driving skills and those who did not. The results showed that the feedback was effective because it reduced the number of oversights while driving.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "When practicing car driving in a real world, there are risks like traffic accidents. With the development of virtual reality technology, we can create environments in which people can practice without being exposed to certain dangers like traffic accidents that potentially occur in real environments. So far, many researches regarding car driving in VR have been developed, but little attention has been paid to analyzing and visualizing eye gaze data collected in VR education systems. Analyzing and visualizing the collected eye gaze data is potential to grasp information of whether learners overlooked objects such as traffic signals and signs. In this study, we developed a driving simulator in VR and collected eye tracking data from five university students while they were practicing driving using eye tracking technology. Also, we have developed a system that allows learners to reflect on their own driving using the collected their eye gaze data. We compared the changes in gaze between those who received feedback to improve their driving skills and those who did not. The results showed that the feedback was effective because it reduced the number of oversights while driving.", "fno": "975500a170", "keywords": [ "Computer Based Training", "Data Analysis", "Data Visualisation", "Gaze Tracking", "Traffic Engineering Computing", "Virtual Reality", "Eye Gaze Data Visualization", "Eye Gaze Data Analysis", "Car Driving Practice", "Real Environments", "Virtual Reality Technology", "Car Training System", "Driving Skills", "Eye Tracking Technology", "Eye Tracking Data", "Driving Simulator", "Traffic Signals", "VR Education Systems", "Traffic Accidents", "Training", "Data Visualization", "Wheels", "Gaze Tracking", "Virtual Reality", "Planning", "Automobiles", "VR", "Virtual Reality", "Vehicle Simulator", "Eye Gaze", "Training" ], "authors": [ { "affiliation": "Hiroshima City University,Graduate School of Information Sciences and Technology,Department of Intelligent Engineering,Hiroshima,Japan", "fullName": "Tetsuhiro Ito", "givenName": "Tetsuhiro", "surname": "Ito", "__typename": "ArticleAuthorType" }, { "affiliation": "Hiroshima City University,Graduate School of Information Sciences and Technology,Department of Intelligent Engineering,Hiroshima,Japan", "fullName": "Kousuke Mouri", "givenName": "Kousuke", "surname": "Mouri", "__typename": "ArticleAuthorType" }, { "affiliation": "Hiroshima City University,Graduate School of Information Sciences and Technology,Department of Intelligent Engineering,Hiroshima,Japan", "fullName": "Masaru Okamoto", "givenName": "Masaru", "surname": "Okamoto", "__typename": "ArticleAuthorType" }, { "affiliation": "Hiroshima City University,Graduate School of Information Sciences and Technology,Department of Intelligent Engineering,Hiroshima,Japan", "fullName": "Yukihiro Matsubara", "givenName": "Yukihiro", "surname": "Matsubara", "__typename": "ArticleAuthorType" } ], "idPrefix": "iiai-aai", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "170-173", "year": "2022", "issn": "2472-0070", "isbn": "978-1-6654-9755-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "975500a164", "articleId": "1GU74zCEZnW", "__typename": "AdjacentArticleType" }, "next": { "fno": "975500a174", "articleId": "1GU6ZMlOCCQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wkdd/2009/3543/0/3543a594", "title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application", "doi": null, "abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn", "parentPublication": { "id": "proceedings/wkdd/2009/3543/0", "title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a787", "title": "VRDoc: Gaze-based Interactions for VR Reading Experience", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a787/1JrRgFp6G2s", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797758", "title": "Person Independent, Privacy Preserving, and Real Time Assessment of Cognitive Load using Eye Tracking in a Virtual Reality Setup", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797758/1cJ10RDnKzS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797896", "title": "Pedagogical Agent Responsive to Eye Tracking in Educational VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797896/1cJ1ceQVCtG", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a422", "title": "Behavior Analysis of Indoor Escape Route-Finding Based on Head-Mounted VR and Eye Tracking", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a422/1ehBGoaPHhK", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0", "title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089578", "title": "Exploring Eye Gaze Visualization Techniques for Identifying Distracted Students in Educational VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089578/1jIxfimnIaY", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090559", "title": "A Methodology of Eye Gazing Attention Determination for VR Training", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090559/1jIxoACmybu", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090461", "title": "Front Camera Eye Tracking For Mobile VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a707", "title": "[DC] Eye Fixation Forecasting in Task-Oriented Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a707/1tnWQmeJsZi", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvris/2020/9636/0/963600a047", "title": "Eye-Tracking Aided VR System for Amblyopic Pediatric Treatment Difficulty Adjustment", "doi": null, "abstractUrl": "/proceedings-article/icvris/2020/963600a047/1x4ZnNSLOtG", "parentPublication": { "id": "proceedings/icvris/2020/9636/0", "title": "2020 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ10RDnKzS", "doi": "10.1109/VR.2019.8797758", "title": "Person Independent, Privacy Preserving, and Real Time Assessment of Cognitive Load using Eye Tracking in a Virtual Reality Setup", "normalizedTitle": "Person Independent, Privacy Preserving, and Real Time Assessment of Cognitive Load using Eye Tracking in a Virtual Reality Setup", "abstract": "Eye tracking is handled as key enabling technology to VR and AR for multiple reasons, since it not only can help to massively reduce computational costs through gaze-based optimization of graphics and rendering, but also offers a unique opportunity to design gaze-based personalized interfaces and applications. Additionally, the analysis of eye tracking data allows to assess the cognitive load, intentions and actions of the user. In this work, we propose a person-independent, privacy-preserving and gaze-based cognitive load recognition scheme for drivers under critical situations based on previously collected driving data from a driving experiment in VR including a safety critical situation. Based on carefully annotated ground-truth information, we used pupillary information and performance measures (inputs on accelerator, brake, and steering wheel) to train multiple classifiers with the aim of assessing the cognitive load of the driver. Our results show that incorporating eye tracking data into the VR setup allows to predict the cognitive load of the user at a high accuracy above 80%. Beyond the specific setup, the proposed framework can be used in any adaptive and intelligent VR/AR application.", "abstracts": [ { "abstractType": "Regular", "content": "Eye tracking is handled as key enabling technology to VR and AR for multiple reasons, since it not only can help to massively reduce computational costs through gaze-based optimization of graphics and rendering, but also offers a unique opportunity to design gaze-based personalized interfaces and applications. Additionally, the analysis of eye tracking data allows to assess the cognitive load, intentions and actions of the user. In this work, we propose a person-independent, privacy-preserving and gaze-based cognitive load recognition scheme for drivers under critical situations based on previously collected driving data from a driving experiment in VR including a safety critical situation. Based on carefully annotated ground-truth information, we used pupillary information and performance measures (inputs on accelerator, brake, and steering wheel) to train multiple classifiers with the aim of assessing the cognitive load of the driver. Our results show that incorporating eye tracking data into the VR setup allows to predict the cognitive load of the user at a high accuracy above 80%. Beyond the specific setup, the proposed framework can be used in any adaptive and intelligent VR/AR application.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Eye tracking is handled as key enabling technology to VR and AR for multiple reasons, since it not only can help to massively reduce computational costs through gaze-based optimization of graphics and rendering, but also offers a unique opportunity to design gaze-based personalized interfaces and applications. Additionally, the analysis of eye tracking data allows to assess the cognitive load, intentions and actions of the user. In this work, we propose a person-independent, privacy-preserving and gaze-based cognitive load recognition scheme for drivers under critical situations based on previously collected driving data from a driving experiment in VR including a safety critical situation. Based on carefully annotated ground-truth information, we used pupillary information and performance measures (inputs on accelerator, brake, and steering wheel) to train multiple classifiers with the aim of assessing the cognitive load of the driver. Our results show that incorporating eye tracking data into the VR setup allows to predict the cognitive load of the user at a high accuracy above 80%. Beyond the specific setup, the proposed framework can be used in any adaptive and intelligent VR/AR application.", "fno": "08797758", "keywords": [ "Cognition", "Data Privacy", "Human Computer Interaction", "Learning Artificial Intelligence", "Optimisation", "Pattern Classification", "User Interfaces", "Virtual Reality", "Privacy Preserving", "Virtual Reality Setup", "Gaze Based Optimization", "Gaze Based Personalized Interfaces", "Eye Tracking Data", "Person Independent", "Cognitive Load Recognition Scheme", "VR Setup", "Ground Truth Information", "Driving Data Collection", "Vehicles", "Gaze Tracking", "Safety", "Real Time Systems", "Roads", "Graphics", "Task Analysis", "Eye Tracking", "Cognitive Load Recognition", "Virtual Reality", "Driving Simulation", "Computing Methodologies X 2014 Computer Graphics X 2014 Graphics Systems And Interfaces X 2014 Virtual Reality Perception", "Computing Methodologies X 2014 Machine Learning X 2014 Machine Learning Approaches X 2014 Classification And Regression Trees Kernel Methods", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Empirical Studies In HCI" ], "authors": [ { "affiliation": "Perception Engineering, University of Tübingen", "fullName": "Efe Bozkir", "givenName": "Efe", "surname": "Bozkir", "__typename": "ArticleAuthorType" }, { "affiliation": "Perception Engineering, University of Tübingen", "fullName": "David Geisler", "givenName": "David", "surname": "Geisler", "__typename": "ArticleAuthorType" }, { "affiliation": "Perception Engineering, University of Tübingen", "fullName": "Enkelejda Kasneci", "givenName": "Enkelejda", "surname": "Kasneci", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1834-1837", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798125", "articleId": "1cJ141Kdrfq", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798273", "articleId": "1cJ0T4CUJTq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2015/9953/0/07344621", "title": "Cognitive state measurement from eye gaze analysis in an intelligent virtual reality driving system for autism intervention", "doi": null, "abstractUrl": "/proceedings-article/acii/2015/07344621/12OmNyfdOR4", "parentPublication": { "id": "proceedings/acii/2015/9953/0", "title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2022/9755/0/975500a170", "title": "Development and evaluation of car training system using VR and eye tracking technology", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2022/975500a170/1GU75yVJubS", "parentPublication": { "id": "proceedings/iiai-aai/2022/9755/0", "title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi/2019/6934/0/08909475", "title": "Eye Tracking based Cognitive-Centered User Models", "doi": null, "abstractUrl": "/proceedings-article/wi/2019/08909475/1febq6zQbFm", "parentPublication": { "id": "proceedings/wi/2019/6934/0", "title": "2019 IEEE/WIC/ACM International Conference on Web Intelligence (WI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csii/2019/2553/0/255300a013", "title": "Detection of Driver's Eye Fixation on a Moving Target by Using Line Fitting", "doi": null, "abstractUrl": "/proceedings-article/csii/2019/255300a013/1fw1r2L7Gne", "parentPublication": { "id": "proceedings/csii/2019/2553/0", "title": "2019 6th International Conference on Computational Science/Intelligence and Applied Informatics (CSII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089578", "title": "Exploring Eye Gaze Visualization Techniques for Identifying Distracted Students in Educational VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089578/1jIxfimnIaY", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090559", "title": "A Methodology of Eye Gazing Attention Determination for VR Training", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090559/1jIxoACmybu", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090461", "title": "Front Camera Eye Tracking For Mobile VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iri/2020/1054/0/09191571", "title": "Automated Filtering of Eye Gaze Metrics from Dynamic Areas of Interest", "doi": null, "abstractUrl": "/proceedings-article/iri/2020/09191571/1n0IyGDlxPq", "parentPublication": { "id": "proceedings/iri/2020/1054/0", "title": "2020 IEEE 21st International Conference on Information Reuse and Integration for Data Science (IRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ewdts/2020/9899/0/09225144", "title": "Exploiting EEG Signals for Eye Motion Tracking", "doi": null, "abstractUrl": "/proceedings-article/ewdts/2020/09225144/1nWNWOWhzj2", "parentPublication": { "id": "proceedings/ewdts/2020/9899/0", "title": "2020 IEEE East-West Design & Test Symposium (EWDTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a707", "title": "[DC] Eye Fixation Forecasting in Task-Oriented Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a707/1tnWQmeJsZi", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1oZBzHKi4UM", "title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)", "acronym": "svr", "groupId": "1800426", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1oZBBw6BBa8", "doi": "10.1109/SVR51698.2020.00066", "title": "Rendering Optimizations for Virtual Reality Using Eye-Tracking", "normalizedTitle": "Rendering Optimizations for Virtual Reality Using Eye-Tracking", "abstract": "Optimizing rendering in virtual reality is an open problem in computer science. The nature of modern VR display technology (high refresh rate and increasing pixel density), coupled with the relatively slow growth modern compute capability, is leading to a bottleneck in VR performance. As we further research methodologies for improving rendering performance and accuracy for VR, it is important to understand the historical approaches and where they succeeded or failed in their approaches. Some implementations will double computing because of the need of stereoscopy, and thus have higher overhead for rendering. This can be improved with Multi-View Rendering where the GPU hardware can assist in duplicating rasterization for multiple views with differing projections. More recently, perception-based rendering has gained traction, which can be further accelerated using Variable Shading Rate or Multi-Rate Shading technology found on more recent GPUs. There has also been some success in using deep neural networks to assist with transmitting foveated content over a network. The advances in the field leave many open research questions, including sparse pixel rendering, driving user attention, and techniques and methodologies for combining variable shading rate images. This review focuses research associated with rendering optimizations for virtual reality using eye tracking, since it is becoming a feature present in consumer-level head-mounted displays. From our review, affordable off-the-shelf virtual reality and eye tracking are both leading to freeing up rendering resources towards improved performance and visual fidelity, as well as providing new and exciting opportunities for human-computer interaction.", "abstracts": [ { "abstractType": "Regular", "content": "Optimizing rendering in virtual reality is an open problem in computer science. The nature of modern VR display technology (high refresh rate and increasing pixel density), coupled with the relatively slow growth modern compute capability, is leading to a bottleneck in VR performance. As we further research methodologies for improving rendering performance and accuracy for VR, it is important to understand the historical approaches and where they succeeded or failed in their approaches. Some implementations will double computing because of the need of stereoscopy, and thus have higher overhead for rendering. This can be improved with Multi-View Rendering where the GPU hardware can assist in duplicating rasterization for multiple views with differing projections. More recently, perception-based rendering has gained traction, which can be further accelerated using Variable Shading Rate or Multi-Rate Shading technology found on more recent GPUs. There has also been some success in using deep neural networks to assist with transmitting foveated content over a network. The advances in the field leave many open research questions, including sparse pixel rendering, driving user attention, and techniques and methodologies for combining variable shading rate images. This review focuses research associated with rendering optimizations for virtual reality using eye tracking, since it is becoming a feature present in consumer-level head-mounted displays. From our review, affordable off-the-shelf virtual reality and eye tracking are both leading to freeing up rendering resources towards improved performance and visual fidelity, as well as providing new and exciting opportunities for human-computer interaction.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Optimizing rendering in virtual reality is an open problem in computer science. The nature of modern VR display technology (high refresh rate and increasing pixel density), coupled with the relatively slow growth modern compute capability, is leading to a bottleneck in VR performance. As we further research methodologies for improving rendering performance and accuracy for VR, it is important to understand the historical approaches and where they succeeded or failed in their approaches. Some implementations will double computing because of the need of stereoscopy, and thus have higher overhead for rendering. This can be improved with Multi-View Rendering where the GPU hardware can assist in duplicating rasterization for multiple views with differing projections. More recently, perception-based rendering has gained traction, which can be further accelerated using Variable Shading Rate or Multi-Rate Shading technology found on more recent GPUs. There has also been some success in using deep neural networks to assist with transmitting foveated content over a network. The advances in the field leave many open research questions, including sparse pixel rendering, driving user attention, and techniques and methodologies for combining variable shading rate images. This review focuses research associated with rendering optimizations for virtual reality using eye tracking, since it is becoming a feature present in consumer-level head-mounted displays. From our review, affordable off-the-shelf virtual reality and eye tracking are both leading to freeing up rendering resources towards improved performance and visual fidelity, as well as providing new and exciting opportunities for human-computer interaction.", "fno": "923100a398", "keywords": [ "Gaze Tracking", "Graphics Processing Units", "Helmet Mounted Displays", "Human Computer Interaction", "Image Resolution", "Rendering Computer Graphics", "Virtual Reality", "High Refresh Rate", "Pixel Density", "VR Performance", "Research Methodologies", "Rendering Performance", "Historical Approaches", "Multiview Rendering", "Perception Based Rendering", "Deep Neural Networks", "Open Research Questions", "Sparse Pixel Rendering", "Variable Shading Rate Images", "Rendering Optimizations", "Eye Tracking", "Consumer Level Head Mounted Displays", "Off The Shelf Virtual Reality", "Rendering Resources", "Human Computer Interaction", "Eye Tracking", "Open Problem", "Computer Science", "GPU Hardware", "Visual Fidelity", "Slow Growth Modern Compute Capability", "Multirate Shading Technology", "Modern VR Display Technology", "Rendering Computer Graphics", "Gaze Tracking", "Hardware", "Virtual Reality", "Visualization", "Tracking", "Headphones", "Virtual Reality", "Perception", "Foveated Rendering", "Multi Rate Shading", "Multi View Rendering" ], "authors": [ { "affiliation": "Ontario Tech University,Faculty of Science,Oshawa,Ontario,L1G-0C5", "fullName": "Sage L. Matthews", "givenName": "Sage L.", "surname": "Matthews", "__typename": "ArticleAuthorType" }, { "affiliation": "Ontario Tech University,Faculty of Science,Oshawa,Ontario,L1G-0C5", "fullName": "Alvaro Uribe-Quevedo", "givenName": "Alvaro", "surname": "Uribe-Quevedo", "__typename": "ArticleAuthorType" }, { "affiliation": "Neurofit VR Inc.,Toronto,Ontario", "fullName": "Alexander Theodorou", "givenName": "Alexander", "surname": "Theodorou", "__typename": "ArticleAuthorType" } ], "idPrefix": "svr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "398-405", "year": "2020", "issn": null, "isbn": "978-1-7281-9231-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "923100a391", "articleId": "1oZBBWaRzNu", "__typename": "AdjacentArticleType" }, "next": { "fno": "923100a406", "articleId": "1oZBC3Rau3K", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2016/0836/0/07504757", "title": "Combining eye tracking with optimizations for lens astigmatism in modern wide-angle HMDs", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504757/12OmNySG3Vp", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446247", "title": "Concept for Rendering Optimizations for Full Human Field of View HMDs", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446247/13bd1eY1x3i", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699283", "title": "Using Eye Tracking to Improve Information Retrieval in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699283/19F1V9Ax9Be", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500d937", "title": "Event-Based Kilohertz Eye Tracking using Coded Differential Lighting", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500d937/1B13uiL4IUM", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a471", "title": "Locomotion-aware Foveated Rendering", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a471/1MNgzzb0RWg", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a397", "title": "Vegetation Rendering Optimization for Virtual Reality Systems", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a397/1ap5wyffDYA", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090461", "title": "Front Camera Eye Tracking For Mobile VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbdie/2020/5900/0/09150219", "title": "Application of Virtual Reality Combined with Eye Tracking Technology for Design Flows", "doi": null, "abstractUrl": "/proceedings-article/icbdie/2020/09150219/1lPGNkVywSI", "parentPublication": { "id": "proceedings/icbdie/2020/5900/0", "title": "2020 International Conference on Big Data and Informatization Education (ICBDIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382914", "title": "A privacy-preserving approach to streaming eye-tracking data", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382914/1saZw54tjDa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a316", "title": "Cloud Rendering Scheme for Standalone Virtual Reality Headset", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a316/1vg8ftWdDoY", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy2agSz", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNBLdKJ7", "doi": "10.1109/CVPRW.2015.7301373", "title": "Fresnel lens imaging with post-capture image processing", "normalizedTitle": "Fresnel lens imaging with post-capture image processing", "abstract": "This paper describes a unified approach to correct optical distortions in images formed by a Fresnel lens with computational post-processing that opens up new opportunities to use Fresnel lenses in lightweight and inexpensive computer vision devices. Traditional methods of aberration correction do not address artifacts introduced by a Fresnel lens in a systematic way and thus fail to deliver image quality acceptable for general-purpose color imaging. In our approach, the image is restored using three steps: first, by deblurring the base color channel, then by sharpening other two channels, and finally by applying color correction. Deblurring and sharpening remove significant chromatic aberration and are similar to the restoration technique used for images formed by simple refraction lenses. Color correction stage removes strong color shift caused by energy redistribution between diffraction orders of Fresnel lens. This post-capture processing was tested on real images formed by a four-step approximation of the Fresnel lens manufactured in our optics laboratory.", "abstracts": [ { "abstractType": "Regular", "content": "This paper describes a unified approach to correct optical distortions in images formed by a Fresnel lens with computational post-processing that opens up new opportunities to use Fresnel lenses in lightweight and inexpensive computer vision devices. Traditional methods of aberration correction do not address artifacts introduced by a Fresnel lens in a systematic way and thus fail to deliver image quality acceptable for general-purpose color imaging. In our approach, the image is restored using three steps: first, by deblurring the base color channel, then by sharpening other two channels, and finally by applying color correction. Deblurring and sharpening remove significant chromatic aberration and are similar to the restoration technique used for images formed by simple refraction lenses. Color correction stage removes strong color shift caused by energy redistribution between diffraction orders of Fresnel lens. This post-capture processing was tested on real images formed by a four-step approximation of the Fresnel lens manufactured in our optics laboratory.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper describes a unified approach to correct optical distortions in images formed by a Fresnel lens with computational post-processing that opens up new opportunities to use Fresnel lenses in lightweight and inexpensive computer vision devices. Traditional methods of aberration correction do not address artifacts introduced by a Fresnel lens in a systematic way and thus fail to deliver image quality acceptable for general-purpose color imaging. In our approach, the image is restored using three steps: first, by deblurring the base color channel, then by sharpening other two channels, and finally by applying color correction. Deblurring and sharpening remove significant chromatic aberration and are similar to the restoration technique used for images formed by simple refraction lenses. Color correction stage removes strong color shift caused by energy redistribution between diffraction orders of Fresnel lens. This post-capture processing was tested on real images formed by a four-step approximation of the Fresnel lens manufactured in our optics laboratory.", "fno": "07301373", "keywords": [ "Lenses", "Image Color Analysis", "Distortion", "Optical Distortion", "Deconvolution", "Diffraction", "Fresnel Reflection" ], "authors": [ { "affiliation": "Samara State Aerospace University, 34, Moskovskoye Shosse, Russia, 443086", "fullName": "Artem Nikonorov", "givenName": "Artem", "surname": "Nikonorov", "__typename": "ArticleAuthorType" }, { "affiliation": "Samara State Aerospace University, 34, Moskovskoye Shosse, Russia, 443086", "fullName": "Roman Skidanov", "givenName": "Roman", "surname": "Skidanov", "__typename": "ArticleAuthorType" }, { "affiliation": "Samara State Aerospace University, 34, Moskovskoye Shosse, Russia, 443086", "fullName": "Vladimir Fursov", "givenName": "Vladimir", "surname": "Fursov", "__typename": "ArticleAuthorType" }, { "affiliation": "Image Processing Systems Institute of RAS, 151, Molodogvardeyskaya str., Samara, Russia, 443001", "fullName": "Maksim Petrov", "givenName": "Maksim", "surname": "Petrov", "__typename": "ArticleAuthorType" }, { "affiliation": "Image Processing Systems Institute of RAS, 151, Molodogvardeyskaya str., Samara, Russia, 443001", "fullName": "Sergey Bibikov", "givenName": "Sergey", "surname": "Bibikov", "__typename": "ArticleAuthorType" }, { "affiliation": "Image Processing Systems Institute of RAS, 151, Molodogvardeyskaya str., Samara, Russia, 443001", "fullName": "Yuriy Yuzifovich", "givenName": "Yuriy", "surname": "Yuzifovich", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-06-01T00:00:00", "pubType": "proceedings", "pages": "33-41", "year": "2015", "issn": "2160-7516", "isbn": "978-1-4673-6759-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07301372", "articleId": "12OmNro0HZL", "__typename": "AdjacentArticleType" }, "next": { "fno": "07301374", "articleId": "12OmNxecS6g", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2013/5050/0/5050a761", "title": "Holographic Projection Using Converging Spherical Wave Illumination", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a761/12OmNASraPv", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eqec/2005/8973/0/01567314", "title": "Fresnel diffraction from polygonal apertures", "doi": null, "abstractUrl": "/proceedings-article/eqec/2005/01567314/12OmNAolGVA", "parentPublication": { "id": "proceedings/eqec/2005/8973/0", "title": "2005 European Quantum Electronics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2013/6463/0/06528316", "title": "What does an aberrated photo tell us about the lens and the scene?", "doi": null, "abstractUrl": "/proceedings-article/iccp/2013/06528316/12OmNCesrcF", "parentPublication": { "id": "proceedings/iccp/2013/6463/0", "title": "2013 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1991/2163/0/00131931", "title": "Active lens control for high precision computer imaging", "doi": null, "abstractUrl": "/proceedings-article/robot/1991/00131931/12OmNqFa5pm", "parentPublication": { "id": "proceedings/robot/1991/2163/0", "title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2010/4077/2/4077c482", "title": "Imaging Research of Fresnel Holography", "doi": null, "abstractUrl": "/proceedings-article/icicta/2010/4077c482/12OmNrJiCXa", "parentPublication": { "id": "proceedings/icicta/2010/4077/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2011/4353/2/05750979", "title": "Research on Resolution of the Volume Holography Imaging Systems", "doi": null, "abstractUrl": "/proceedings-article/icicta/2011/05750979/12OmNwFicVh", "parentPublication": { "id": "icicta/2011/4353/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/date/2006/1/1/01657016", "title": "Lens Aberration Aware Timing-Driven Placement", "doi": null, "abstractUrl": "/proceedings-article/date/2006/01657016/12OmNxZ2Glb", "parentPublication": { "id": "proceedings/date/2006/1/1", "title": "2006 Design, Automation and Test in Europe", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032d268", "title": "Revisiting Cross-Channel Information Transfer for Chromatic Aberration Correction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d268/12OmNywxlHl", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aipr/2010/8833/0/05759696", "title": "A system and method for auto-correction of first order lens distortion", "doi": null, "abstractUrl": "/proceedings-article/aipr/2010/05759696/12OmNzw8jc1", "parentPublication": { "id": "proceedings/aipr/2010/8833/0", "title": "2010 IEEE 39th Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09384477", "title": "Lenslet VR: Thin, Flat and Wide-FOV Virtual Reality Display Using Fresnel Lens and Lenslet Array", "doi": null, "abstractUrl": "/journal/tg/2021/05/09384477/1scDuWhBPY4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNASrawz", "title": "2009 IEEE Virtual Reality Conference", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNBd9T2L", "doi": "10.1109/VR.2009.4811043", "title": "A Concept for Applying VR and AR Technologies to Support Efficient 3D Non-contact Model Digitalization", "normalizedTitle": "A Concept for Applying VR and AR Technologies to Support Efficient 3D Non-contact Model Digitalization", "abstract": "In this paper a new interactive digitization concept for large real world objects is described using the virtual reality (VR) environment Elbe Dom. The method combines augmented reality (AR) technologies with a high quality display of textured three dimensional (3D) models. One focus is thereby the display of valuable information of the actual measuring process. The proposed method is a non-contact technique, especially applicable to objects with freeform and a size up to 5 m times 5 m times 3 m. The method achieves an accuracy value of less than 1 mm within the whole measuring volume.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper a new interactive digitization concept for large real world objects is described using the virtual reality (VR) environment Elbe Dom. The method combines augmented reality (AR) technologies with a high quality display of textured three dimensional (3D) models. One focus is thereby the display of valuable information of the actual measuring process. The proposed method is a non-contact technique, especially applicable to objects with freeform and a size up to 5 m times 5 m times 3 m. The method achieves an accuracy value of less than 1 mm within the whole measuring volume.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper a new interactive digitization concept for large real world objects is described using the virtual reality (VR) environment Elbe Dom. The method combines augmented reality (AR) technologies with a high quality display of textured three dimensional (3D) models. One focus is thereby the display of valuable information of the actual measuring process. The proposed method is a non-contact technique, especially applicable to objects with freeform and a size up to 5 m times 5 m times 3 m. The method achieves an accuracy value of less than 1 mm within the whole measuring volume.", "fno": "04811043", "keywords": [ "Augmented Reality", "Computer Displays", "Image Texture", "Solid Modelling", "Virtual Reality", "Augmented Reality", "3 D Noncontact Model Digitalization", "Interactive Digitization", "Large Real World Object", "High Quality Display", "Textured Three Dimensional Model", "Virtual Reality", "Optical Sensors", "Target Tracking", "Displays", "Sensor Systems", "Optical Distortion", "Calibration", "Geometrical Optics", "Volume Measurement", "Information Geometry", "3 D Model Generation", "VR Display", "AR", "Tracking", "H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial", "Augmented", "And Virtual Realities", "I 4 1 Image Processing And Computer Vision Digitization And Image Capture Scanning" ], "authors": [ { "affiliation": "Fraunhofer Institute for Factory Operation and Automation email: wolfram.schoor@iff.fraunhofer.de", "fullName": "Wolfram Schoor", "givenName": "Wolfram", "surname": "Schoor", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Factory Operation and Automation email: steffen.masik@iff.fraunhofer.de", "fullName": "Steffen Masik", "givenName": "Steffen", "surname": "Masik", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Factory Operation and Automation email: johannes.tuemler@iff.fraunhofer.de", "fullName": "Johannes Tumler", "givenName": "Johannes", "surname": "Tumler", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Factory Operation and Automation email: simon.adler@iff.fraunhofer.de", "fullName": "Simon Adler", "givenName": "Simon", "surname": "Adler", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Factory Operation and Automation email: marc.hofmann@iff.fraunhofer.de", "fullName": "Marc Hofmann", "givenName": "Marc", "surname": "Hofmann", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Factory Operation and Automation email: erik.trostmann@iff.fraunhofer.de", "fullName": "Erik Trostmann", "givenName": "Erik", "surname": "Trostmann", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-03-01T00:00:00", "pubType": "proceedings", "pages": "257-258", "year": "2009", "issn": "1087-8270", "isbn": "978-1-4244-3943-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04811042", "articleId": "12OmNy7yEfO", "__typename": "AdjacentArticleType" }, "next": { "fno": "04811044", "articleId": "12OmNx7ouYf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2010/9343/0/05643561", "title": "Effects of a retroreflective screen on depth perception in a head-mounted projection display", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643561/12OmNB9bvby", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1988/0852/0/00012340", "title": "Camera calibration methodology based on a linear perspective transformation error model", "doi": null, "abstractUrl": "/proceedings-article/robot/1988/00012340/12OmNwpXRTg", "parentPublication": { "id": "proceedings/robot/1988/0852/0", "title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a109", "title": "[POSTER] Fusion of Unsynchronized Optical Tracker and Inertial Sensor in EKF Framework for In-car Augmented Reality Delay Reduction", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a109/12OmNxQOjHO", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1988/0852/0/00012182", "title": "Calibration of theodolites", "doi": null, "abstractUrl": "/proceedings-article/robot/1988/00012182/12OmNyRPgEU", "parentPublication": { "id": "proceedings/robot/1988/0852/0", "title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699263", "title": "Design and Calibration of an Augmented Reality Haploscope", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699263/19F1OYkEmWs", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699319", "title": "Effect of Navigation Speed and VR Devices on Cybersickness", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699319/19F1OrW6KxW", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a628", "title": "A Binocular Model to Evaluate User Experience in Ophthalmic and AR Prescription Lens Designs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a628/1J7WmUiV2la", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/03/09186170", "title": "Parallax Free Registration for Augmented Reality Optical See-Through Displays in the Peripersonal Space", "doi": null, "abstractUrl": "/journal/tg/2022/03/09186170/1mP2AYgyLQY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09384477", "title": "Lenslet VR: Thin, Flat and Wide-FOV Virtual Reality Display Using Fresnel Lens and Lenslet Array", "doi": null, "abstractUrl": "/journal/tg/2021/05/09384477/1scDuWhBPY4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09490310", "title": "Shedding Light on Cast Shadows: An Investigation of Perceived Ground Contact in AR and VR", "doi": null, "abstractUrl": "/journal/tg/2022/12/09490310/1vmGThNh9jq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MNgk3BHlS0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1MNgIE9xnBC", "doi": "10.1109/VR55154.2023.00065", "title": "Virtual Optical Bench: Teaching Spherical Lens Layout in VR with Real-Time Ray Tracing", "normalizedTitle": "Virtual Optical Bench: Teaching Spherical Lens Layout in VR with Real-Time Ray Tracing", "abstract": "Teaching in optical systems design is usually performed on an optical bench. While experimentation plays an important role in education, experiments involving expensive or dangerous components are usually limited to short, heavily supervised sessions. Computer simulations, on the other hand, offer high accessibility, but suffer from reduced realism and tangibility when presented on a 2D screen. For this reason, we present the virtual optical bench, an application that lets users explore spherical lens layouts in virtual reality (VR). We implemented a numerically accurate simulation of optical systems using Nvidia OptiX, as well as a prototypical VR application, which we then evaluated in an expert review with 6 optics experts. Based on their feedback, we re-implemented our VR application in Unreal Engine 4. The re-implementation has since been actively used for teaching optical layouts, where we performed a qualitative evaluation with 18 students. We show that our virtual optical bench achieves good usability and is perceived to enhance the understanding of course contents.", "abstracts": [ { "abstractType": "Regular", "content": "Teaching in optical systems design is usually performed on an optical bench. While experimentation plays an important role in education, experiments involving expensive or dangerous components are usually limited to short, heavily supervised sessions. Computer simulations, on the other hand, offer high accessibility, but suffer from reduced realism and tangibility when presented on a 2D screen. For this reason, we present the virtual optical bench, an application that lets users explore spherical lens layouts in virtual reality (VR). We implemented a numerically accurate simulation of optical systems using Nvidia OptiX, as well as a prototypical VR application, which we then evaluated in an expert review with 6 optics experts. Based on their feedback, we re-implemented our VR application in Unreal Engine 4. The re-implementation has since been actively used for teaching optical layouts, where we performed a qualitative evaluation with 18 students. We show that our virtual optical bench achieves good usability and is perceived to enhance the understanding of course contents.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Teaching in optical systems design is usually performed on an optical bench. While experimentation plays an important role in education, experiments involving expensive or dangerous components are usually limited to short, heavily supervised sessions. Computer simulations, on the other hand, offer high accessibility, but suffer from reduced realism and tangibility when presented on a 2D screen. For this reason, we present the virtual optical bench, an application that lets users explore spherical lens layouts in virtual reality (VR). We implemented a numerically accurate simulation of optical systems using Nvidia OptiX, as well as a prototypical VR application, which we then evaluated in an expert review with 6 optics experts. Based on their feedback, we re-implemented our VR application in Unreal Engine 4. The re-implementation has since been actively used for teaching optical layouts, where we performed a qualitative evaluation with 18 students. We show that our virtual optical bench achieves good usability and is perceived to enhance the understanding of course contents.", "fno": "481500a503", "keywords": [ "Solid Modeling", "Optical Design", "Optical Feedback", "Education", "Layout", "User Interfaces", "Adaptive Optics" ], "authors": [ { "affiliation": "Visual Computing Institute, RWTH Aachen University", "fullName": "Martin Bellgardt", "givenName": "Martin", "surname": "Bellgardt", "__typename": "ArticleAuthorType" }, { "affiliation": "Visual Computing Institute, RWTH Aachen University", "fullName": "Sebastian Pape", "givenName": "Sebastian", "surname": "Pape", "__typename": "ArticleAuthorType" }, { "affiliation": "Visual Computing Institute, RWTH Aachen University", "fullName": "David Gilbert", "givenName": "David", "surname": "Gilbert", "__typename": "ArticleAuthorType" }, { "affiliation": "RWTH Aachen University,Chair for Technology of Optical Systems", "fullName": "Marcel Prochnau", "givenName": "Marcel", "surname": "Prochnau", "__typename": "ArticleAuthorType" }, { "affiliation": "RWTH Aachen University,Chair for Technology of Optical Systems", "fullName": "Georg König", "givenName": "Georg", "surname": "König", "__typename": "ArticleAuthorType" }, { "affiliation": "Visual Computing Institute, RWTH Aachen University", "fullName": "Torsten W. Kuhlen", "givenName": "Torsten W.", "surname": "Kuhlen", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-03-01T00:00:00", "pubType": "proceedings", "pages": "503-508", "year": "2023", "issn": null, "isbn": "979-8-3503-4815-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1MNgHLX0kFO", "name": "pvr202348150-010108451s1-mm_481500a503.zip", "size": "198 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108451s1-mm_481500a503.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "481500a493", "articleId": "1MNgLMRvOtq", "__typename": "AdjacentArticleType" }, "next": { "fno": "481500a509", "articleId": "1MNgqqS7YI0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iri/2014/5880/0/07051914", "title": "Towards ray optics formalization of optical imaging systems", "doi": null, "abstractUrl": "/proceedings-article/iri/2014/07051914/12OmNvq5jzp", "parentPublication": { "id": "proceedings/iri/2014/5880/0", "title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/memsys/1997/3744/0/00581762", "title": "Microactuated micro-XYZ stages for free-space micro-optical bench", "doi": null, "abstractUrl": "/proceedings-article/memsys/1997/00581762/12OmNwkR5Cb", "parentPublication": { "id": "proceedings/memsys/1997/3744/0", "title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bwcca/2014/4173/0/4173a371", "title": "Optical Ray Tracing Based on Dijkstra Algorithm in Inhomogeneous Medium", "doi": null, "abstractUrl": "/proceedings-article/bwcca/2014/4173a371/12OmNzXFozK", "parentPublication": { "id": "proceedings/bwcca/2014/4173/0", "title": "2014 Ninth International Conference on Broadband and Wireless Computing, Communication and Applications (BWCCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09760161", "title": "Predicting Subjective Discomfort Associated with Lens Distortion in VR Headsets During Vestibulo-Ocular Response to VR Scenes", "doi": null, "abstractUrl": "/journal/tg/5555/01/09760161/1CHsCvUiJQA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a350", "title": "Exploring Presence, Avatar Embodiment, and Body Perception with a Holographic Augmented Reality Mirror", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a350/1CJcn3q3J5K", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10050417", "title": "Perceptually-guided Dual-mode Virtual Reality System For Motion-adaptive Display", "doi": null, "abstractUrl": "/journal/tg/2023/05/10050417/1KYoAIZA47e", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798209", "title": "Enactive Approach to Assess Perceived Speed Error during Walking and Running in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798209/1cI6auzeLYY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09384477", "title": "Lenslet VR: Thin, Flat and Wide-FOV Virtual Reality Display Using Fresnel Lens and Lenslet Array", "doi": null, "abstractUrl": "/journal/tg/2021/05/09384477/1scDuWhBPY4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2021/3892/0/389200a628", "title": "Study on spherical aberration in the laser optical system", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2021/389200a628/1t2n9aXMNPO", "parentPublication": { "id": "proceedings/icmtma/2021/3892/0", "title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a635", "title": "Virtual Optical Bench: A VR learning tool for optical design", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a635/1tnXjn7B6WQ", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ12M9tKM0", "doi": "10.1109/VR.2019.8798107", "title": "Gaze-Dependent Distortion Correction for Thick Lenses in HMDs", "normalizedTitle": "Gaze-Dependent Distortion Correction for Thick Lenses in HMDs", "abstract": "Common VR headsets require lenses that increase the field of view and allow the user to focus the display. In most systems, thick lenses are used that generate strong pincushion distortions. To account for this, the content is warped by a corresponding barrel distortion before displaying, resulting in undistorted images for the viewer. This approach assumes that the eye is exactly positioned, typically on the optical axis of the lens. However, in real systems the eye's location deviates from this optimal position even at rest; moreover, the pupil - and thus the optical center of the eye - moves by several millimeters when the user looks around. Thus, eye movement results in additional distortion, which is ignored in current VR headsets. Also in literature on head-mounted displays, the effect is most often not considered, or at least badly documented. The contribution of this paper are experiments that emphasize the importance of this mostly ignored effect. To this end, we have built a simple setup with a camera at variable eye positions in a standard VR headset, that allows us to directly measure the variation of distortion during eye movement. Our experiments show that distortion varies by several dozens of pixels within the full range of eye movements, which emphasizes that the effect is definitely significant. We also demonstrate how in a headset with built-in eye tracker, the knowledge of the eye position can be used to achieve a view-dependent lens correction with only minimal additional effort at run-time.", "abstracts": [ { "abstractType": "Regular", "content": "Common VR headsets require lenses that increase the field of view and allow the user to focus the display. In most systems, thick lenses are used that generate strong pincushion distortions. To account for this, the content is warped by a corresponding barrel distortion before displaying, resulting in undistorted images for the viewer. This approach assumes that the eye is exactly positioned, typically on the optical axis of the lens. However, in real systems the eye's location deviates from this optimal position even at rest; moreover, the pupil - and thus the optical center of the eye - moves by several millimeters when the user looks around. Thus, eye movement results in additional distortion, which is ignored in current VR headsets. Also in literature on head-mounted displays, the effect is most often not considered, or at least badly documented. The contribution of this paper are experiments that emphasize the importance of this mostly ignored effect. To this end, we have built a simple setup with a camera at variable eye positions in a standard VR headset, that allows us to directly measure the variation of distortion during eye movement. Our experiments show that distortion varies by several dozens of pixels within the full range of eye movements, which emphasizes that the effect is definitely significant. We also demonstrate how in a headset with built-in eye tracker, the knowledge of the eye position can be used to achieve a view-dependent lens correction with only minimal additional effort at run-time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Common VR headsets require lenses that increase the field of view and allow the user to focus the display. In most systems, thick lenses are used that generate strong pincushion distortions. To account for this, the content is warped by a corresponding barrel distortion before displaying, resulting in undistorted images for the viewer. This approach assumes that the eye is exactly positioned, typically on the optical axis of the lens. However, in real systems the eye's location deviates from this optimal position even at rest; moreover, the pupil - and thus the optical center of the eye - moves by several millimeters when the user looks around. Thus, eye movement results in additional distortion, which is ignored in current VR headsets. Also in literature on head-mounted displays, the effect is most often not considered, or at least badly documented. The contribution of this paper are experiments that emphasize the importance of this mostly ignored effect. To this end, we have built a simple setup with a camera at variable eye positions in a standard VR headset, that allows us to directly measure the variation of distortion during eye movement. Our experiments show that distortion varies by several dozens of pixels within the full range of eye movements, which emphasizes that the effect is definitely significant. We also demonstrate how in a headset with built-in eye tracker, the knowledge of the eye position can be used to achieve a view-dependent lens correction with only minimal additional effort at run-time.", "fno": "08798107", "keywords": [ "Cameras", "Eye", "Gaze Tracking", "Helmet Mounted Displays", "Lenses", "Optical Distortion", "Virtual Reality", "Eye Movement", "HMD", "Pincushion Distortions", "Barrel Distortion", "Field Of View", "Optical Center", "Optical Axis", "Undistorted Images", "Gaze Dependent Distortion Correction", "View Dependent Lens Correction", "Eye Position", "Eye Tracker", "Standard VR Headset", "Variable Eye Positions", "Head Mounted Displays", "Distortion", "Optical Distortion", "Lenses", "Adaptive Optics", "Distortion Measurement", "Cameras", "Optical Imaging" ], "authors": [ { "affiliation": "University of Erlangen-Nuremberg", "fullName": "Jonathan Martschinke", "givenName": "Jonathan", "surname": "Martschinke", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Erlangen-Nuremberg", "fullName": "Jana Martschinke", "givenName": "Jana", "surname": "Martschinke", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Erlangen-Nuremberg", "fullName": "Marc Stamminger", "givenName": "Marc", "surname": "Stamminger", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Erlangen-Nuremberg", "fullName": "Frank Bauer", "givenName": "Frank", "surname": "Bauer", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1848-1851", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798030", "articleId": "1cJ1dsOkvw4", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798297", "articleId": "1cJ13JSUePK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2009/4442/0/05457553", "title": "Krill-eye : Superposition compound eye for wide-angle imaging via GRIN lenses", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457553/12OmNAtK4gM", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032d867", "title": "Parameter-Free Lens Distortion Calibration of Central Cameras", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d867/12OmNB1eJyc", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549353", "title": "A robust camera-based method for optical distortion calibration of head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549353/12OmNwvVrHy", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a612", "title": "Self-Calibration of Optical Lenses", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a612/12OmNyQ7FPm", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504757", "title": "Combining eye tracking with optimizations for lens astigmatism in modern wide-angle HMDs", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504757/12OmNySG3Vp", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07064856", "title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/08/i1309", "title": "Parameter-Free Radial Distortion Correction with Center of Distortion Estimation", "doi": null, "abstractUrl": "/journal/tp/2007/08/i1309/13rRUxASuqt", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09760161", "title": "Predicting Subjective Discomfort Associated with Lens Distortion in VR Headsets During Vestibulo-Ocular Response to VR Scenes", "doi": null, "abstractUrl": "/journal/tg/5555/01/09760161/1CHsCvUiJQA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b062", "title": "Revisiting Radial Distortion Absolute Pose", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b062/1hVlRpT15wA", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09384477", "title": "Lenslet VR: Thin, Flat and Wide-FOV Virtual Reality Display Using Fresnel Lens and Lenslet Array", "doi": null, "abstractUrl": "/journal/tg/2021/05/09384477/1scDuWhBPY4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pystLSz19C", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pysw2tXYOY", "doi": "10.1109/ISMAR50242.2020.00027", "title": "Optical distortions in VR bias the perceived slant of moving surfaces", "normalizedTitle": "Optical distortions in VR bias the perceived slant of moving surfaces", "abstract": "The magnifying optics of virtual reality (VR) head-mounted displays (HMD) often cause undesirable pincushion distortion in the displayed imagery. Eccentrically increasing magnification radially displaces image-points away from the optical axis, causing straight lines to curve outwards. This, in turn, should affect the 3D perception of surface shape by warping binocular and monocular depth cues. Previous research has shown that distortion-induced biases in perceived slant do occur in static images. However, most use cases in VR involve moving images. Here we evaluate the impact of motion on biases in perceived slant. An HMD was used to present flat, textured surfaces that varied in slant and were either stationary, or translated laterally by the observer. In separate studies we varied the degree of distortion and evaluated the impact on perceived slant at several locations along the surface. We found that, irrespective of whether the surface was moving or stationary, distortion introduced significant bias into local slant estimates. The pattern of results is consistent with the surface appearing to be concave (as if viewing the inside surface of a bowl), as predicted from the warping of binocular and monocular cues. Importantly, the intermediate distortion level produced the same, but weaker, pattern of biases seen in the fully-distorted condition. When an appropriate level of pre-warping was applied, slant perception was veridical. Overall, our results highlight the importance of sufficiently correcting for optical distortions in VR HMDs to enable veridical perception of surface attitude.", "abstracts": [ { "abstractType": "Regular", "content": "The magnifying optics of virtual reality (VR) head-mounted displays (HMD) often cause undesirable pincushion distortion in the displayed imagery. Eccentrically increasing magnification radially displaces image-points away from the optical axis, causing straight lines to curve outwards. This, in turn, should affect the 3D perception of surface shape by warping binocular and monocular depth cues. Previous research has shown that distortion-induced biases in perceived slant do occur in static images. However, most use cases in VR involve moving images. Here we evaluate the impact of motion on biases in perceived slant. An HMD was used to present flat, textured surfaces that varied in slant and were either stationary, or translated laterally by the observer. In separate studies we varied the degree of distortion and evaluated the impact on perceived slant at several locations along the surface. We found that, irrespective of whether the surface was moving or stationary, distortion introduced significant bias into local slant estimates. The pattern of results is consistent with the surface appearing to be concave (as if viewing the inside surface of a bowl), as predicted from the warping of binocular and monocular cues. Importantly, the intermediate distortion level produced the same, but weaker, pattern of biases seen in the fully-distorted condition. When an appropriate level of pre-warping was applied, slant perception was veridical. Overall, our results highlight the importance of sufficiently correcting for optical distortions in VR HMDs to enable veridical perception of surface attitude.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The magnifying optics of virtual reality (VR) head-mounted displays (HMD) often cause undesirable pincushion distortion in the displayed imagery. Eccentrically increasing magnification radially displaces image-points away from the optical axis, causing straight lines to curve outwards. This, in turn, should affect the 3D perception of surface shape by warping binocular and monocular depth cues. Previous research has shown that distortion-induced biases in perceived slant do occur in static images. However, most use cases in VR involve moving images. Here we evaluate the impact of motion on biases in perceived slant. An HMD was used to present flat, textured surfaces that varied in slant and were either stationary, or translated laterally by the observer. In separate studies we varied the degree of distortion and evaluated the impact on perceived slant at several locations along the surface. We found that, irrespective of whether the surface was moving or stationary, distortion introduced significant bias into local slant estimates. The pattern of results is consistent with the surface appearing to be concave (as if viewing the inside surface of a bowl), as predicted from the warping of binocular and monocular cues. Importantly, the intermediate distortion level produced the same, but weaker, pattern of biases seen in the fully-distorted condition. When an appropriate level of pre-warping was applied, slant perception was veridical. Overall, our results highlight the importance of sufficiently correcting for optical distortions in VR HMDs to enable veridical perception of surface attitude.", "fno": "850800a073", "keywords": [ "Helmet Mounted Displays", "Image Texture", "Optical Distortion", "Virtual Reality", "Visual Perception", "Optical Distortions", "VR Bias", "Perceived Slant", "Magnifying Optics", "Virtual Reality Head Mounted Displays", "HMD", "Undesirable Pincushion Distortion", "Displayed Imagery", "Image Points", "Optical Axis", "Surface Shape", "Warping Binocular Depth Cues", "Monocular Depth Cues", "Distortion Induced Biases", "Static Images", "Textured Surfaces", "Significant Bias", "Local Slant Estimates", "Binocular Cues", "Monocular Cues", "Intermediate Distortion Level", "Fully Distorted Condition", "Slant Perception", "VR HMD", "Surface Attitude", "Visualization", "Three Dimensional Displays", "Optical Distortion", "Resists", "Distortion", "Predistortion", "Lenses", "Human Centered Computing", "Virtual Reality", "Human Centered Computing", "Empirical Studies In HCI", "Computing Methodologies", "Perception", "Computing Methodologies", "Virtual Reality" ], "authors": [ { "affiliation": "York University,Centre for Vision Research,Department of Psychology,Toronto,Ontario,Canada", "fullName": "Jonathan Tong", "givenName": "Jonathan", "surname": "Tong", "__typename": "ArticleAuthorType" }, { "affiliation": "York University,Centre for Vision Research,Department of Electrical Engineering and Computer Science,Toronto,Ontario,Canada", "fullName": "Robert S. Allison", "givenName": "Robert S.", "surname": "Allison", "__typename": "ArticleAuthorType" }, { "affiliation": "York University,Centre for Vision Research,Department of Psychology,Toronto,Ontario,Canada", "fullName": "Laurie M. Wilcox", "givenName": "Laurie M.", "surname": "Wilcox", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "73-79", "year": "2020", "issn": "1554-7868", "isbn": "978-1-7281-8508-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "850800a064", "articleId": "1pysxPMqyTm", "__typename": "AdjacentArticleType" }, "next": { "fno": "850800a080", "articleId": "1pysvYTZF6w", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/1992/2855/0/00223201", "title": "Correcting chromatic aberrations using image warping", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223201/12OmNAXxXiJ", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2018/2290/0/08343085", "title": "A transformation analysis of 3D virtual object for projection mapping", "doi": null, "abstractUrl": "/proceedings-article/icoin/2018/08343085/12OmNvSbBz8", "parentPublication": { "id": "proceedings/icoin/2018/2290/0", "title": "2018 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549353", "title": "A robust camera-based method for optical distortion calibration of head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549353/12OmNwvVrHy", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/03/07448905", "title": "Triangulation in Random Refractive Distortions", "doi": null, "abstractUrl": "/journal/tp/2017/03/07448905/13rRUILc8gs", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/12/08462767", "title": "Material Classification from Time-of-Flight Distortions", "doi": null, "abstractUrl": "/journal/tp/2019/12/08462767/13w3lplmyhq", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200e987", "title": "Learning to Remove Refractive Distortions from Underwater Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200e987/1BmEYs8Sthu", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09760161", "title": "Predicting Subjective Discomfort Associated with Lens Distortion in VR Headsets During Vestibulo-Ocular Response to VR Scenes", "doi": null, "abstractUrl": "/journal/tg/5555/01/09760161/1CHsCvUiJQA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798107", "title": "Gaze-Dependent Distortion Correction for Thick Lenses in HMDs", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798107/1cJ12M9tKM0", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b062", "title": "Revisiting Radial Distortion Absolute Pose", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b062/1hVlRpT15wA", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icris/2020/1969/0/196900a092", "title": "Nondestructive testing method for surface defects of mechanical parts based on machine vision", "doi": null, "abstractUrl": "/proceedings-article/icris/2020/196900a092/1wG5YNnK8TK", "parentPublication": { "id": "proceedings/icris/2020/1969/0", "title": "2020 International Conference on Robots & Intelligent System (ICRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "18M7cVguDNm", "title": "2019 Third IEEE International Conference on Robotic Computing (IRC)", "acronym": "irc", "groupId": "1819925", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "18M7ddCK7o4", "doi": "10.1109/IRC.2019.00036", "title": "Markerless Racket Pose Detection and Stroke Classification Based on Stereo Vision for Table Tennis Robots", "normalizedTitle": "Markerless Racket Pose Detection and Stroke Classification Based on Stereo Vision for Table Tennis Robots", "abstract": "For table tennis robots, it is a significant challenge to understand the opponent's movements and return the ball accordingly with high performance. One has to cope with various ball speeds and spins resulting from different stroke types. In this paper, we propose a real-time 3D racket pose detection method and classify racket movements into five stroke categories with a neural network. By using two monocular cameras, we can extract the racket's contours and choose some special points as feature points in image coordinates. With the 3D geometrical information of a racket, a wide baseline stereo matching method is proposed to find the corresponding feature points and compute the 3D position and orientation of the racket by triangulation and plane fitting. Then, a Kalman filter is adopted to track the racket pose, and a neural network with two hidden layers is used to classify the pose movements. We conduct two experiments to evaluate the accuracy of racket pose detection and classification, in which the average error in position and orientation is around 7.8 mm and 7.2° by comparing with the ground truth from a KUKA robot. The classification accuracy is 98%, the same as the human pose estimation method with Convolutional Pose Machines (CPMs).", "abstracts": [ { "abstractType": "Regular", "content": "For table tennis robots, it is a significant challenge to understand the opponent's movements and return the ball accordingly with high performance. One has to cope with various ball speeds and spins resulting from different stroke types. In this paper, we propose a real-time 3D racket pose detection method and classify racket movements into five stroke categories with a neural network. By using two monocular cameras, we can extract the racket's contours and choose some special points as feature points in image coordinates. With the 3D geometrical information of a racket, a wide baseline stereo matching method is proposed to find the corresponding feature points and compute the 3D position and orientation of the racket by triangulation and plane fitting. Then, a Kalman filter is adopted to track the racket pose, and a neural network with two hidden layers is used to classify the pose movements. We conduct two experiments to evaluate the accuracy of racket pose detection and classification, in which the average error in position and orientation is around 7.8 mm and 7.2° by comparing with the ground truth from a KUKA robot. The classification accuracy is 98%, the same as the human pose estimation method with Convolutional Pose Machines (CPMs).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "For table tennis robots, it is a significant challenge to understand the opponent's movements and return the ball accordingly with high performance. One has to cope with various ball speeds and spins resulting from different stroke types. In this paper, we propose a real-time 3D racket pose detection method and classify racket movements into five stroke categories with a neural network. By using two monocular cameras, we can extract the racket's contours and choose some special points as feature points in image coordinates. With the 3D geometrical information of a racket, a wide baseline stereo matching method is proposed to find the corresponding feature points and compute the 3D position and orientation of the racket by triangulation and plane fitting. Then, a Kalman filter is adopted to track the racket pose, and a neural network with two hidden layers is used to classify the pose movements. We conduct two experiments to evaluate the accuracy of racket pose detection and classification, in which the average error in position and orientation is around 7.8 mm and 7.2° by comparing with the ground truth from a KUKA robot. The classification accuracy is 98%, the same as the human pose estimation method with Convolutional Pose Machines (CPMs).", "fno": "924500a189", "keywords": [ "Cameras", "Feature Extraction", "Image Classification", "Image Matching", "Kalman Filters", "Pose Estimation", "Robot Vision", "Sport", "Stereo Image Processing", "Convolutional Pose Machines", "Feature Points", "Wide Baseline Stereo Matching", "Markerless Racket Pose Detection", "Real Time 3 D Racket Pose Detection", "Monocular Cameras", "Image Coordinates", "Plane Fitting", "Kalman Filter", "Racket Pose Classification", "KUKA Robot", "Human Pose Estimation", "Pose Movements", "3 D Geometrical Information", "Neural Network", "Stroke Categories", "Racket Movements", "Table Tennis Robots", "Stereo Vision", "Stroke Classification", "Feature Extraction", "Cameras", "Robot Kinematics", "Three Dimensional Displays", "Sports", "Sports Equipment", "Racket Pose Detection Pose Classification Stereo Matching Table Tennis Robot" ], "authors": [ { "affiliation": null, "fullName": "Yapeng Gao", "givenName": "Yapeng", "surname": "Gao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jonas Tebbe", "givenName": "Jonas", "surname": "Tebbe", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Julian Krismer", "givenName": "Julian", "surname": "Krismer", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Andreas Zell", "givenName": "Andreas", "surname": "Zell", "__typename": "ArticleAuthorType" } ], "idPrefix": "irc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-02-01T00:00:00", "pubType": "proceedings", "pages": "189-196", "year": "2019", "issn": null, "isbn": "978-1-5386-9245-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "924500a183", "articleId": "18M7gauFRT2", "__typename": "AdjacentArticleType" }, "next": { "fno": "924500a560", "articleId": "18M7eeB8Bxu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/is3c/2016/3071/0/3071a362", "title": "An Intelligent Tennis Ball Collecting Vehicle Using Smart Phone Touch-Based Interface", "doi": null, "abstractUrl": "/proceedings-article/is3c/2016/3071a362/12OmNBTJIAG", "parentPublication": { "id": "proceedings/is3c/2016/3071/0", "title": "2016 International Symposium on Computer, Consumer and Control (IS3C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2010/7846/0/05571164", "title": "Real-Time Immersive Table Tennis Game for Two Players with Motion Tracking", "doi": null, "abstractUrl": "/proceedings-article/iv/2010/05571164/12OmNyeECD6", "parentPublication": { "id": "proceedings/iv/2010/7846/0", "title": "2010 14th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/case/2011/1732/0/06042396", "title": "A vision system with multiple cameras designed for humanoid robots to play table tennis", "doi": null, "abstractUrl": "/proceedings-article/case/2011/06042396/12OmNzuZUD6", "parentPublication": { "id": "proceedings/case/2011/1732/0", "title": "2011 IEEE International Conference on Automation Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2018/2666/1/266601a783", "title": "Painless Tennis Ball Tracking System", "doi": null, "abstractUrl": "/proceedings-article/compsac/2018/266601a783/144U9aHWXpJ", "parentPublication": { "id": "proceedings/compsac/2018/2666/2", "title": "2018 IEEE 42nd Annual Computer Software and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000b839", "title": "Convolutional Neural Networks Based Ball Detection in Tennis Games", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000b839/17D45Xh13wp", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiars/2022/5457/0/545700a135", "title": "Intelligent Repair System of Table Tennis Server Based on Data Analysis Algorithm", "doi": null, "abstractUrl": "/proceedings-article/aiars/2022/545700a135/1J2XPikx7b2", "parentPublication": { "id": "proceedings/aiars/2022/5457/0", "title": "2022 International Conference on Artificial Intelligence and Autonomous Robot Systems (AIARS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382892", "title": "SPinPong - Virtual Reality Table Tennis Skill Acquisition using Visual, Haptic and Temporal Cues", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382892/1saZrRoiA3C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412391", "title": "Extraction and analysis of 3D kinematic parameters of Table Tennis ball from a single camera", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412391/1tmi56Q22Ck", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900e571", "title": "Table Tennis Stroke Recognition Using Two-Dimensional Human Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900e571/1yJYs5Lr65W", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmu/2021/48/0/09638855", "title": "Toward the Perfect Stroke: A Multimodal Approach for Table Tennis Stroke Evaluation", "doi": null, "abstractUrl": "/proceedings-article/icmu/2021/09638855/1zktfg0C87u", "parentPublication": { "id": "proceedings/icmu/2021/48/0", "title": "2021 Thirteenth International Conference on Mobile Computing and Ubiquitous Network (ICMU)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J7W6LmbCw0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "9973799", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J7Wlv8mvKM", "doi": "10.1109/ISMAR-Adjunct57072.2022.00182", "title": "Virtual Reality Sonification Training System Can Improve a Novice's Forehand Return of Serve in Tennis", "normalizedTitle": "Virtual Reality Sonification Training System Can Improve a Novice's Forehand Return of Serve in Tennis", "abstract": "Virtual reality (VR) is gaining interest as a platform for sports skills training. VR allows for information manipulation and feedback that would be difficult in reality. This is particularly useful in open skill sports where players must adjust their behavior in response to environmental factors. Auditory feedback (sonification) is constructive for sports training in VR. However, this has not been well studied in open skill-specific situations due to the difficulty of accounting for environmental factors in reality. This study focuses on a serve return, an important scene in tennis. It investigates the effects of sonification on the forehand return stroke in VR by comparing score displays and trajectory visualizations. We designed the sonification based on the difference between experienced and novice players' movements in VR. We then conducted a between-subjects experiment to investigate the effect of the sonification (N=20). The results showed that the system with sonification effectively improved the timing of hip movement for preparing a slow serve return compared to the system without sonification.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality (VR) is gaining interest as a platform for sports skills training. VR allows for information manipulation and feedback that would be difficult in reality. This is particularly useful in open skill sports where players must adjust their behavior in response to environmental factors. Auditory feedback (sonification) is constructive for sports training in VR. However, this has not been well studied in open skill-specific situations due to the difficulty of accounting for environmental factors in reality. This study focuses on a serve return, an important scene in tennis. It investigates the effects of sonification on the forehand return stroke in VR by comparing score displays and trajectory visualizations. We designed the sonification based on the difference between experienced and novice players' movements in VR. We then conducted a between-subjects experiment to investigate the effect of the sonification (N=20). The results showed that the system with sonification effectively improved the timing of hip movement for preparing a slow serve return compared to the system without sonification.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality (VR) is gaining interest as a platform for sports skills training. VR allows for information manipulation and feedback that would be difficult in reality. This is particularly useful in open skill sports where players must adjust their behavior in response to environmental factors. Auditory feedback (sonification) is constructive for sports training in VR. However, this has not been well studied in open skill-specific situations due to the difficulty of accounting for environmental factors in reality. This study focuses on a serve return, an important scene in tennis. It investigates the effects of sonification on the forehand return stroke in VR by comparing score displays and trajectory visualizations. We designed the sonification based on the difference between experienced and novice players' movements in VR. We then conducted a between-subjects experiment to investigate the effect of the sonification (N=20). The results showed that the system with sonification effectively improved the timing of hip movement for preparing a slow serve return compared to the system without sonification.", "fno": "536500a845", "keywords": [ "Audio Signal Processing", "Audio User Interfaces", "Biomechanics", "Data Visualisation", "Graphical User Interfaces", "Hearing", "Psychology", "Sport", "Virtual Reality", "Auditory Feedback", "Environmental Factors", "Forehand Return Stroke", "Information Manipulation", "Open Skill Sports", "Open Skill Specific Situations", "Slow Serve Return", "Sports Skills Training", "Sports Training", "Tennis", "Virtual Reality Sonification Training System", "VR", "Training", "Human Computer Interaction", "Virtual Environments", "Data Visualization", "Sonification", "Environmental Factors", "Trajectory", "Human Centered Computing Virtual Reality", "Human Centered Computing Empirical Studies In HCI" ], "authors": [ { "affiliation": "Keio University,NII Communication Science Laboratories", "fullName": "Katsutoshi Masai", "givenName": "Katsutoshi", "surname": "Masai", "__typename": "ArticleAuthorType" }, { "affiliation": "Keio University", "fullName": "Takuma Kajiyama", "givenName": "Takuma", "surname": "Kajiyama", "__typename": "ArticleAuthorType" }, { "affiliation": "Keio University", "fullName": "Tadashi Muramatsu", "givenName": "Tadashi", "surname": "Muramatsu", "__typename": "ArticleAuthorType" }, { "affiliation": "Keio University", "fullName": "Maki Sugimoto", "givenName": "Maki", "surname": "Sugimoto", "__typename": "ArticleAuthorType" }, { "affiliation": "Keio University,NII Communication Science Laboratories", "fullName": "Toshitaka Kimura", "givenName": "Toshitaka", "surname": "Kimura", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "845-849", "year": "2022", "issn": null, "isbn": "978-1-6654-5365-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "536500a839", "articleId": "1J7W82UYOVa", "__typename": "AdjacentArticleType" }, "next": { "fno": "536500a850", "articleId": "1J7Ww7fDbsQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cisis/2012/4687/0/4687a699", "title": "A Supporting Method of Medical Imaging Diagnosis with Sonification", "doi": null, "abstractUrl": "/proceedings-article/cisis/2012/4687a699/12OmNvRU0hw", "parentPublication": { "id": "proceedings/cisis/2012/4687/0", "title": "2012 Sixth International Conference on Complex, Intelligent, and Software Intensive Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/3/3336e014", "title": "Study on Application of CAD Sonification", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336e014/12OmNwK7obY", "parentPublication": { "id": "proceedings/csse/2008/3336/3", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2005/2397/0/23970017", "title": "MotionLab Sonify: A Framework for the Sonification of Human Motion Data", "doi": null, "abstractUrl": "/proceedings-article/iv/2005/23970017/12OmNweBUNu", "parentPublication": { "id": "proceedings/iv/2005/2397/0", "title": "Ninth International Conference on Information Visualisation (IV'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/varms-ieeevr/2015/6926/0/07151725", "title": "Enhancing visualization of molecular simulations using sonification", "doi": null, "abstractUrl": "/proceedings-article/varms-ieeevr/2015/07151725/12OmNx0A7CV", "parentPublication": { "id": "proceedings/varms-ieeevr/2015/6926/0", "title": "2015 IEEE 1st International Workshop on Virtual and Augmented Reality for Molecular Science (VARMS@IEEEVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2015/01/mmu2015010058", "title": "Interactive Sonification in Rowing: Acoustic Feedback for On-Water Training", "doi": null, "abstractUrl": "/magazine/mu/2015/01/mmu2015010058/13rRUxjyX13", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sive/2018/5713/0/08577080", "title": "Quantum: An art-science case study on sonification and sound design in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/sive/2018/08577080/17D45We0UEe", "parentPublication": { "id": "proceedings/sive/2018/5713/0", "title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a804", "title": "Assist Home Training Table Tennis Skill Acquisition via Immersive Learning and Web Technologies", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a804/1CJd0JOwO9a", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382892", "title": "SPinPong - Virtual Reality Table Tennis Skill Acquisition using Visual, Haptic and Temporal Cues", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382892/1saZrRoiA3C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09446582", "title": "Performance Improvement and Skill Transfer in Table Tennis Through Training in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/12/09446582/1u8lz4qWghi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2021/3225/0/322500a251", "title": "AIive: Interactive Visualization and Sonification of Neural Networks in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/aivr/2021/322500a251/1zxLxmIs3xm", "parentPublication": { "id": "proceedings/aivr/2021/3225/0", "title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrJiCDn", "title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on", "acronym": "ihmsc", "groupId": "1002959", "volume": "1", "displayVolume": "1", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNzQR1nK", "doi": "10.1109/IHMSC.2010.81", "title": "A Novel Simple 2D Model of Eye Gaze Estimation", "normalizedTitle": "A Novel Simple 2D Model of Eye Gaze Estimation", "abstract": "With the development of image process technology, camera-based eye gaze estimation methods make a possible nonintrusive way of human computer interaction (HCI). Most of them base on Pupil-Cornea Reflection Technique (PCRT) which uses extra light sources and estimates gaze point on screen by a polynomial mapping function. In this paper, the input vector of classical PCRT is adjusted to a new one which can be precisely extracted in the system using only one camera. The adjusted PCRT is called Pupil-Corner Technique (PCT). Meanwhile, a novel simple 2D Geometric Model (GM) is proposed. It bases on the geometric relationship of eyeballs and screen and simulates the movement of eyeballs when user looks through the screen with his head still. Furthermore, a fitting method is used to promote the accuracy of GM. At last, the combined technique called Geometric Fitting Technique (GFT) is compared with PCT in experiments and the results show that GFT has an acceptable accuracy which can be applied in HCI applications and future geometric model is suggested by the guide concluded from the paper to promote accuracy.", "abstracts": [ { "abstractType": "Regular", "content": "With the development of image process technology, camera-based eye gaze estimation methods make a possible nonintrusive way of human computer interaction (HCI). Most of them base on Pupil-Cornea Reflection Technique (PCRT) which uses extra light sources and estimates gaze point on screen by a polynomial mapping function. In this paper, the input vector of classical PCRT is adjusted to a new one which can be precisely extracted in the system using only one camera. The adjusted PCRT is called Pupil-Corner Technique (PCT). Meanwhile, a novel simple 2D Geometric Model (GM) is proposed. It bases on the geometric relationship of eyeballs and screen and simulates the movement of eyeballs when user looks through the screen with his head still. Furthermore, a fitting method is used to promote the accuracy of GM. At last, the combined technique called Geometric Fitting Technique (GFT) is compared with PCT in experiments and the results show that GFT has an acceptable accuracy which can be applied in HCI applications and future geometric model is suggested by the guide concluded from the paper to promote accuracy.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the development of image process technology, camera-based eye gaze estimation methods make a possible nonintrusive way of human computer interaction (HCI). Most of them base on Pupil-Cornea Reflection Technique (PCRT) which uses extra light sources and estimates gaze point on screen by a polynomial mapping function. In this paper, the input vector of classical PCRT is adjusted to a new one which can be precisely extracted in the system using only one camera. The adjusted PCRT is called Pupil-Corner Technique (PCT). Meanwhile, a novel simple 2D Geometric Model (GM) is proposed. It bases on the geometric relationship of eyeballs and screen and simulates the movement of eyeballs when user looks through the screen with his head still. Furthermore, a fitting method is used to promote the accuracy of GM. At last, the combined technique called Geometric Fitting Technique (GFT) is compared with PCT in experiments and the results show that GFT has an acceptable accuracy which can be applied in HCI applications and future geometric model is suggested by the guide concluded from the paper to promote accuracy.", "fno": "4151a300", "keywords": [ "Eye Gaze Estimation", "Eye Gaze Tracking", "Eye Gaze Model", "Mapping Function", "Human Computer Interaction" ], "authors": [ { "affiliation": null, "fullName": "Guojian Shao", "givenName": "Guojian", "surname": "Shao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ming Che", "givenName": "Ming", "surname": "Che", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Bingyi Zhang", "givenName": "Bingyi", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kunfang Cen", "givenName": "Kunfang", "surname": "Cen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Wei Gao", "givenName": "Wei", "surname": "Gao", "__typename": "ArticleAuthorType" } ], "idPrefix": "ihmsc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-08-01T00:00:00", "pubType": "proceedings", "pages": "300-304", "year": "2010", "issn": null, "isbn": "978-0-7695-4151-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4151a296", "articleId": "12OmNBAqZHZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "4151a305", "articleId": "12OmNzdoMUq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/gcis/2009/3571/2/3571b133", "title": "Key Techniques of Eye Gaze Tracking Based on Pupil Corneal Reflection", "doi": null, "abstractUrl": "/proceedings-article/gcis/2009/3571b133/12OmNA0vo1q", "parentPublication": { "id": "proceedings/gcis/2009/3571/2", "title": "2009 WRI Global Congress on Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2011/4367/0/4367a423", "title": "Eye-based HCI with Full Specification of Mouse and Keyboard Using Pupil Knowledge in the Gaze Estimation", "doi": null, "abstractUrl": "/proceedings-article/itng/2011/4367a423/12OmNAfPISv", "parentPublication": { "id": "proceedings/itng/2011/4367/0", "title": "Information Technology: New Generations, Third International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/1999/0481/0/04810171", "title": "Keeping an Eye for HCI", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/1999/04810171/12OmNAoUT4O", "parentPublication": { "id": "proceedings/sibgrapi/1999/0481/0", "title": "XII Brazilian Symposium on Computer Graphics and Image Processing (Cat. No.PR00481)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995675", "title": "Probabilistic gaze estimation without active personal calibration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995675/12OmNC8MsAV", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icinis/2010/4249/0/4249a048", "title": "Implementation and Optimization of the Eye Gaze Tracking System Based on DM642", "doi": null, "abstractUrl": "/proceedings-article/icinis/2010/4249a048/12OmNs4S8I4", "parentPublication": { "id": "proceedings/icinis/2010/4249/0", "title": "Intelligent Networks and Intelligent Systems, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoip/2010/4252/1/4252a131", "title": "A Simplified 3D Gaze Tracking Technology with Stereo Vision", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252a131/12OmNwqft0F", "parentPublication": { "id": "proceedings/icoip/2010/4252/2", "title": "Optoelectronics and Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a184", "title": "Online Gaze Disparity via Bioncular Eye Tracking on Stereoscopic Displays", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a184/12OmNx6xHm5", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109d870", "title": "Visual Gaze Estimation by Joint Head and Eye Information", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109d870/12OmNyRg4Cq", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2010/03/ttp2010030478", "title": "In the Eye of the Beholder: A Survey of Models for Eyes and Gaze", "doi": null, "abstractUrl": "/journal/tp/2010/03/ttp2010030478/13rRUxOdD9o", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09389490", "title": "Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz", "doi": null, "abstractUrl": "/journal/tg/2021/05/09389490/1smZT5W55V6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAsTgXc", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNBl6EHn", "doi": "10.1109/ICCVW.2011.6130256", "title": "3D environment measurement using binocular stereo and motion stereo by mobile robot with omnidirectional stereo camera", "normalizedTitle": "3D environment measurement using binocular stereo and motion stereo by mobile robot with omnidirectional stereo camera", "abstract": "Map information is important for path planning and selflocalization when mobile robots accomplish autonomous tasks. In unknown environments, they should generate environment maps by themselves. An omnidirectional camera is effective for environment measurement, because it has a wide field of view. There are binocular stereo and motion stereo in traditional methods for measurement by omnidirectional camera. However, each method has advantages and disadvantages. In this paper, we aim to improve measurement accuracy by integrating binocular stereo and motion stereo using two omnidirectional cameras installed on a mobile robot. In addition, stereo matching accuracy is improved by considering omnidirectional image distortion. Experimental results show the effectiveness of our proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "Map information is important for path planning and selflocalization when mobile robots accomplish autonomous tasks. In unknown environments, they should generate environment maps by themselves. An omnidirectional camera is effective for environment measurement, because it has a wide field of view. There are binocular stereo and motion stereo in traditional methods for measurement by omnidirectional camera. However, each method has advantages and disadvantages. In this paper, we aim to improve measurement accuracy by integrating binocular stereo and motion stereo using two omnidirectional cameras installed on a mobile robot. In addition, stereo matching accuracy is improved by considering omnidirectional image distortion. Experimental results show the effectiveness of our proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Map information is important for path planning and selflocalization when mobile robots accomplish autonomous tasks. In unknown environments, they should generate environment maps by themselves. An omnidirectional camera is effective for environment measurement, because it has a wide field of view. There are binocular stereo and motion stereo in traditional methods for measurement by omnidirectional camera. However, each method has advantages and disadvantages. In this paper, we aim to improve measurement accuracy by integrating binocular stereo and motion stereo using two omnidirectional cameras installed on a mobile robot. In addition, stereo matching accuracy is improved by considering omnidirectional image distortion. Experimental results show the effectiveness of our proposed method.", "fno": "06130256", "keywords": [ "Cameras", "Image Matching", "Mobile Robots", "Motion Estimation", "Path Planning", "Robot Vision", "Stereo Image Processing", "3 D Environment Measurement", "Binocular Stereo", "Motion Stereo", "Mobile Robot", "Omnidirectional Stereo Camera", "Map Information", "Path Planning", "Self Localization", "Measurement Accuracy", "Two Omnidirectional Camera", "Stereo Matching Accuracy", "Omnidirectional Image Distortion", "Motion Measurement", "Cameras", "Lenses" ], "authors": [ { "affiliation": "Department of Mechanical Engineering, Shizuoka University, 3-5-1 Johoku, Naka-ku, Hamamatsu-shi, 432-8561, Japan", "fullName": "Shinichi Goto", "givenName": "Shinichi", "surname": "Goto", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Precision Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, 113-8656, Japan", "fullName": "Atsushi Yamashita", "givenName": "Atsushi", "surname": "Yamashita", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Mechanical Engineering, Shizuoka University, 3-5-1 Johoku, Naka-ku, Hamamatsu-shi, 432-8561, Japan", "fullName": "Ryosuke Kawanishi", "givenName": "Ryosuke", "surname": "Kawanishi", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Mechanical Engineering, Shizuoka University, 3-5-1 Johoku, Naka-ku, Hamamatsu-shi, 432-8561, Japan", "fullName": "Toru Kaneko", "givenName": "Toru", "surname": "Kaneko", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Precision Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, 113-8656, Japan", "fullName": "Hajime Asama", "givenName": "Hajime", "surname": "Asama", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-11-01T00:00:00", "pubType": "proceedings", "pages": "296-303", "year": "2011", "issn": null, "isbn": "978-1-4673-0063-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06130255", "articleId": "12OmNzzxuyx", "__typename": "AdjacentArticleType" }, "next": { "fno": "06130257", "articleId": "12OmNAFFdJc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/1993/3880/0/00341143", "title": "Active binocular stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1993/00341143/12OmNqEAT9V", "parentPublication": { "id": "proceedings/cvpr/1993/3880/0", "title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206530", "title": "Stereographic rectification of omnidirectional stereo pairs", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206530/12OmNyqzM2C", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2000/0750/4/07504589", "title": "Real-Time Generation and Presentation of View-Dependent Binocular Stereo Images Using a Sequence of Omnidirectional Images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2000/07504589/12OmNz5JC9I", "parentPublication": { "id": "proceedings/icpr/2000/0750/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2010/4040/0/4040a183", "title": "Binocular Camera Calibration Using Rectification Error", "doi": null, "abstractUrl": "/proceedings-article/crv/2010/4040a183/12OmNzSyCbE", "parentPublication": { "id": "proceedings/crv/2010/4040/0", "title": "2010 Canadian Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/culture-and-computing/2017/1135/0/08227335", "title": "Walk through a Museum with Binocular Stereo Effect and Spherical Panorama Views", "doi": null, "abstractUrl": "/proceedings-article/culture-and-computing/2017/08227335/17D45XtvpdY", "parentPublication": { "id": "proceedings/culture-and-computing/2017/1135/0", "title": "2017 International Conference on Culture and Computing (Culture and Computing)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2962", "title": "Uniform Subdivision of Omnidirectional Camera Space for Efficient Spherical Stereo Matching", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2962/1H1iRK3bg5O", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956197", "title": "On Depth Error from Spherical Camera Calibration within Omnidirectional Stereo Vision", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956197/1IHqnsyNjbO", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icctec/2017/5784/0/578400b093", "title": "Target Detection Based on Binocular Stereo Vision", "doi": null, "abstractUrl": "/proceedings-article/icctec/2017/578400b093/1cks8x4TANG", "parentPublication": { "id": "proceedings/icctec/2017/5784/0", "title": "2017 International Conference on Computer Technology, Electronics and Communication (ICCTEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/11/09086445", "title": "End-to-End Learning for Omnidirectional Stereo Matching With Uncertainty Prior", "doi": null, "abstractUrl": "/journal/tp/2021/11/09086445/1jAciRM8xws", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaa/2021/3730/0/373000a081", "title": "Research on Target Ranging Method Based on Binocular Stereo Vision", "doi": null, "abstractUrl": "/proceedings-article/icaa/2021/373000a081/1zL1W1JhPoY", "parentPublication": { "id": "proceedings/icaa/2021/3730/0", "title": "2021 International Conference on Intelligent Computing, Automation and Applications (ICAA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwB2dUd", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "acronym": "3dui", "groupId": "1001623", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNC8MsI4", "doi": "10.1109/3DUI.2016.7460070", "title": "Considerations on binocular mismatching in observation-based diminished reality", "normalizedTitle": "Considerations on binocular mismatching in observation-based diminished reality", "abstract": "In this paper, we introduce novel problems of binocular stereo (binocular mismatching) in observation-based diminished reality. To confirm these problems, we simulated an observation-based diminished reality system using a video see-through head-mounted display. We also demonstrated that simple methods can reduce such binocular mismatching.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we introduce novel problems of binocular stereo (binocular mismatching) in observation-based diminished reality. To confirm these problems, we simulated an observation-based diminished reality system using a video see-through head-mounted display. We also demonstrated that simple methods can reduce such binocular mismatching.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we introduce novel problems of binocular stereo (binocular mismatching) in observation-based diminished reality. To confirm these problems, we simulated an observation-based diminished reality system using a video see-through head-mounted display. We also demonstrated that simple methods can reduce such binocular mismatching.", "fno": "07460070", "keywords": [ "Three Dimensional Displays", "Rendering Computer Graphics", "Cameras", "Switches", "Fluctuations", "Glass", "Virtual Reality", "Image Based Rendering", "Diminished Reality", "Mixed Reality", "Video See Through Head Mounted Display", "Binocular Stereo" ], "authors": [ { "affiliation": "Ritsumeikan University", "fullName": "Hitomi Matsuki", "givenName": "Hitomi", "surname": "Matsuki", "__typename": "ArticleAuthorType" }, { "affiliation": "Ritsumeikan University", "fullName": "Shohei Mori", "givenName": "Shohei", "surname": "Mori", "__typename": "ArticleAuthorType" }, { "affiliation": "Ritsumeikan University", "fullName": "Sei Ikeda", "givenName": "Sei", "surname": "Ikeda", "__typename": "ArticleAuthorType" }, { "affiliation": "Ritsumeikan University", "fullName": "Fumihisa Shibata", "givenName": "Fumihisa", "surname": "Shibata", "__typename": "ArticleAuthorType" }, { "affiliation": "Ritsumeikan University", "fullName": "Asako Kimura", "givenName": "Asako", "surname": "Kimura", "__typename": "ArticleAuthorType" }, { "affiliation": "Ritsumeikan University", "fullName": "Hideyuki Tamura", "givenName": "Hideyuki", "surname": "Tamura", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dui", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-03-01T00:00:00", "pubType": "proceedings", "pages": "261-262", "year": "2016", "issn": null, "isbn": "978-1-5090-0842-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07460069", "articleId": "12OmNwAKCQd", "__typename": "AdjacentArticleType" }, "next": { "fno": "07460071", "articleId": "12OmNAPjA8q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismarw/2016/3740/0/07836520", "title": "First Deployment of Diminished Reality for Anatomy Education", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836520/12OmNAYGlBY", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icie/2009/3679/2/3679b045", "title": "A Depth-Dependent Fusion Algorithm for Enhanced Reality Based on Binocular Vision", "doi": null, "abstractUrl": "/proceedings-article/icie/2009/3679b045/12OmNAlvI0w", "parentPublication": { "id": "icie/2009/3679/2", "title": "2009 WASE International Conference on Information Engineering (ICIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/memsys/1997/3744/0/00581869", "title": "Fiberscope-type environmental monitoring devices with binocular parallax accommodation mechanism for stereoscopic observation", "doi": null, "abstractUrl": "/proceedings-article/memsys/1997/00581869/12OmNC0guAb", "parentPublication": { "id": "proceedings/memsys/1997/3744/0", "title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2015/8471/0/8471a032", "title": "Efficient Use of Textured 3D Model for Pre-observation-based Diminished Reality", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2015/8471a032/12OmNwdbVbT", "parentPublication": { "id": "proceedings/ismarw/2015/8471/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892370", "title": "Diminished hand: A diminished reality-based work area visualization", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892370/12OmNx0RIJt", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2012/4725/0/4725a141", "title": "A Viewpoint about Diminished Reality: Is it Possible Remove Objects in Real Time from Scenes?", "doi": null, "abstractUrl": "/proceedings-article/svr/2012/4725a141/12OmNxEjXZZ", "parentPublication": { "id": "proceedings/svr/2012/4725/0", "title": "2012 14th Symposium on Virtual and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuvr/2008/3259/0/3259a025", "title": "Projection-Based Diminished Reality System", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2008/3259a025/12OmNyXMQft", "parentPublication": { "id": "proceedings/isuvr/2008/3259/0", "title": "International Symposium on Ubiquitous Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699270", "title": "Towards Mobile Diminished Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699270/19F1SDkm35e", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a308", "title": "Online Adaptive Integration of Observation and Inpainting for Diminished Reality with Online Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a308/1J7Wkijm8Yo", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a870", "title": "DeclutterAR: Mobile Diminished Reality and Augmented Reality to Address Hoarding by Motivating Decluttering and Selling on Online Marketplace", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a870/1J7WqRKPLO0", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAWpykB", "title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05) - Workshops", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNqIQSkS", "doi": "10.1109/CVPR.2005.446", "title": "Dynamic Panoramic Surround Map: Motivation and Omni Video Based Approach", "normalizedTitle": "Dynamic Panoramic Surround Map: Motivation and Omni Video Based Approach", "abstract": "Awareness of what surrounds a vehicle directly affects the safe driving and maneuvering of an automobile. Surround information or maps can help in ethnographic studies of driver behavior as well as provide a critical input in the development of effective driver assistance system. In this paper, we introduce the concept of Dynamic Panoramic Surround (DPS) map that shows the nearby surroundings of the vehicle, and detects the objects of importance on the road. Omnidirectional cameras which give a panoramic view of the surroundings can be useful for visualizing and analyzing the nearby surroundings of the vehicle. A novel approach for synthesizing the DPS using stereo and motion analysis of video images from a pair of omni-directional cameras on the vehicle is developed. Successful generation of DPS in experimental runs on an instrumented vehicle testbed is demonstrated. These experiments prove the basic feasibility and show promise of omni video based DPS capture algorithm to provide useful semantic descriptors of the state of moving vehicles and obstacles in the vicinity of a vehicle.", "abstracts": [ { "abstractType": "Regular", "content": "Awareness of what surrounds a vehicle directly affects the safe driving and maneuvering of an automobile. Surround information or maps can help in ethnographic studies of driver behavior as well as provide a critical input in the development of effective driver assistance system. In this paper, we introduce the concept of Dynamic Panoramic Surround (DPS) map that shows the nearby surroundings of the vehicle, and detects the objects of importance on the road. Omnidirectional cameras which give a panoramic view of the surroundings can be useful for visualizing and analyzing the nearby surroundings of the vehicle. A novel approach for synthesizing the DPS using stereo and motion analysis of video images from a pair of omni-directional cameras on the vehicle is developed. Successful generation of DPS in experimental runs on an instrumented vehicle testbed is demonstrated. These experiments prove the basic feasibility and show promise of omni video based DPS capture algorithm to provide useful semantic descriptors of the state of moving vehicles and obstacles in the vicinity of a vehicle.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Awareness of what surrounds a vehicle directly affects the safe driving and maneuvering of an automobile. Surround information or maps can help in ethnographic studies of driver behavior as well as provide a critical input in the development of effective driver assistance system. In this paper, we introduce the concept of Dynamic Panoramic Surround (DPS) map that shows the nearby surroundings of the vehicle, and detects the objects of importance on the road. Omnidirectional cameras which give a panoramic view of the surroundings can be useful for visualizing and analyzing the nearby surroundings of the vehicle. A novel approach for synthesizing the DPS using stereo and motion analysis of video images from a pair of omni-directional cameras on the vehicle is developed. Successful generation of DPS in experimental runs on an instrumented vehicle testbed is demonstrated. These experiments prove the basic feasibility and show promise of omni video based DPS capture algorithm to provide useful semantic descriptors of the state of moving vehicles and obstacles in the vicinity of a vehicle.", "fno": "237230061", "keywords": [], "authors": [ { "affiliation": "University of California San Diego", "fullName": "Tarak Gandhi", "givenName": "Tarak", "surname": "Gandhi", "__typename": "ArticleAuthorType" }, { "affiliation": "University of California San Diego", "fullName": "Mohan M. Trivedi", "givenName": "Mohan M.", "surname": "Trivedi", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-06-01T00:00:00", "pubType": "proceedings", "pages": "61", "year": "2005", "issn": "1063-6919", "isbn": "0-7695-2660-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "237230060", "articleId": "12OmNyRg4Cj", "__typename": "AdjacentArticleType" }, "next": { "fno": "237230062", "articleId": "12OmNBPtJAt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2007/1016/0/04284826", "title": "Enhancing a Driver's Situation Awareness using a Global View Map", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284826/12OmNAH5dlx", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2009/3736/5/3736e274", "title": "Beacon Tracking with an Embedded Omni-vision System", "doi": null, "abstractUrl": "/proceedings-article/icnc/2009/3736e274/12OmNASraRq", "parentPublication": { "id": "proceedings/icnc/2009/3736/5", "title": "2009 Fifth International Conference on Natural Computation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2014/4308/0/4308a676", "title": "A Surround View Camera Solution for Embedded Systems", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2014/4308a676/12OmNAZx8Ms", "parentPublication": { "id": "proceedings/cvprw/2014/4308/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2008/3095/0/3095a502", "title": "A New Remote Camera Work System for Teleconference Using a Combination of Omni-Directional and Network Controlled Cameras", "doi": null, "abstractUrl": "/proceedings-article/aina/2008/3095a502/12OmNC943Nh", "parentPublication": { "id": "proceedings/aina/2008/3095/0", "title": "22nd International Conference on Advanced Information Networking and Applications (aina 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/case/2007/1153/0/04341852", "title": "Mechanical Design of \"Omni-Ball\": Spherical Wheel for Holonomic Omnidirectional Motion", "doi": null, "abstractUrl": "/proceedings-article/case/2007/04341852/12OmNxA3YRf", "parentPublication": { "id": "proceedings/case/2007/1153/0", "title": "3rd Annual IEEE Conference on Automation Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2016/1437/0/1437a901", "title": "Embedded Computing Framework for Vision-Based Real-Time Surround Threat Analysis and Driver Assistance", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2016/1437a901/12OmNzd7bgM", "parentPublication": { "id": "proceedings/cvprw/2016/1437/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1990/2057/0/00139591", "title": "Omni-directional stereo for making global map", "doi": null, "abstractUrl": "/proceedings-article/iccv/1990/00139591/12OmNzdoN8o", "parentPublication": { "id": "proceedings/iccv/1990/2057/0", "title": "Proceedings Third International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imis/2013/4974/0/4974a331", "title": "Wireless Networked Omni-directional Video Distribution System Based on Delay Tolerant Network on Disaster Environment", "doi": null, "abstractUrl": "/proceedings-article/imis/2013/4974a331/12OmNzwHvm1", "parentPublication": { "id": "proceedings/imis/2013/4974/0", "title": "2013 Seventh International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2009/02/mmu2009020012", "title": "The Sonic Nomadic: Exploring Mobile Surround-Sound Interactions", "doi": null, "abstractUrl": "/magazine/mu/2009/02/mmu2009020012/13rRUxBJhjU", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b486", "title": "Revisit Surround-view Camera System Calibration", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b486/1cdOFk0IJ7W", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx7ouUK", "title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001", "acronym": "cvpr", "groupId": "1000147", "volume": "1", "displayVolume": "2", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNxdVh1C", "doi": "10.1109/CVPR.2001.990542", "title": "Precise Omnidirectional Camera Calibration", "normalizedTitle": "Precise Omnidirectional Camera Calibration", "abstract": "Recent omnidirectional camera designs aim a conventional camera at a mirror that expands the camera?s field of view. This wide view is ideal for three-dimensional vision tasks such as motion estimation and obstacle detection, but these applications require an accurate model of the imaging process. We present a full model of the imaging process, which includes the rotation and translation between the camera and mirror, and an algorithm that determines this relative position from observations of known points in a single image. We present tests of the model and of the calibration procedure for various amounts of misalignment between the mirror and camera. These tests show that the algorithm recovers the correct relative position, and that by using the full model, accurate shape-from-motion and stereo matching are possible even if the camera and mirror are severely misaligned.", "abstracts": [ { "abstractType": "Regular", "content": "Recent omnidirectional camera designs aim a conventional camera at a mirror that expands the camera?s field of view. This wide view is ideal for three-dimensional vision tasks such as motion estimation and obstacle detection, but these applications require an accurate model of the imaging process. We present a full model of the imaging process, which includes the rotation and translation between the camera and mirror, and an algorithm that determines this relative position from observations of known points in a single image. We present tests of the model and of the calibration procedure for various amounts of misalignment between the mirror and camera. These tests show that the algorithm recovers the correct relative position, and that by using the full model, accurate shape-from-motion and stereo matching are possible even if the camera and mirror are severely misaligned.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent omnidirectional camera designs aim a conventional camera at a mirror that expands the camera?s field of view. This wide view is ideal for three-dimensional vision tasks such as motion estimation and obstacle detection, but these applications require an accurate model of the imaging process. We present a full model of the imaging process, which includes the rotation and translation between the camera and mirror, and an algorithm that determines this relative position from observations of known points in a single image. We present tests of the model and of the calibration procedure for various amounts of misalignment between the mirror and camera. These tests show that the algorithm recovers the correct relative position, and that by using the full model, accurate shape-from-motion and stereo matching are possible even if the camera and mirror are severely misaligned.", "fno": "127210689", "keywords": [], "authors": [ { "affiliation": "Carnegie Mellon University", "fullName": "Dennis Strelow", "givenName": "Dennis", "surname": "Strelow", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University", "fullName": "Jeffrey Mishler", "givenName": "Jeffrey", "surname": "Mishler", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University", "fullName": "David Koes", "givenName": "David", "surname": "Koes", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University", "fullName": "Sanjiv Singh", "givenName": "Sanjiv", "surname": "Singh", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-12-01T00:00:00", "pubType": "proceedings", "pages": "689", "year": "2001", "issn": "1063-6919", "isbn": "0-7695-1272-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "127210682", "articleId": "12OmNxFaLvu", "__typename": "AdjacentArticleType" }, "next": { "fno": "127210695", "articleId": "12OmNwlZu4x", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/rvsp/2011/4581/0/4581a199", "title": "Mirror Based IMU-camera and Internal Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/rvsp/2011/4581a199/12OmNANTAz0", "parentPublication": { "id": "proceedings/rvsp/2011/4581/0", "title": "International Conference on Robot, Vision and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2009/3651/0/3651a148", "title": "It's All Done with Mirrors: Calibration-and-Correspondence-Free 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/crv/2009/3651a148/12OmNC943Ga", "parentPublication": { "id": "proceedings/crv/2009/3651/0", "title": "2009 Canadian Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvvrhc/1998/8283/0/82830020", "title": "Omnidirectional Sensing and Combined Multiple Sensing", "doi": null, "abstractUrl": "/proceedings-article/cvvrhc/1998/82830020/12OmNrMHOpH", "parentPublication": { "id": "proceedings/cvvrhc/1998/8283/0", "title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840c368", "title": "Extrinsic Camera Calibration without a Direct View Using Spherical Mirror", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840c368/12OmNrkT7B5", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459330", "title": "Display-camera calibration from eye reflections", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459330/12OmNsd6vhv", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2005/2467/0/24670026", "title": "Auto-Calibration of a Compound-Type Omnidirectional Camera", "doi": null, "abstractUrl": "/proceedings-article/dicta/2005/24670026/12OmNvDZF0Q", "parentPublication": { "id": "proceedings/dicta/2005/2467/0", "title": "Digital Image Computing: Techniques and Applications (DICTA'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2006/2521/4/252140861", "title": "An Omnidirectional Stereo Vision System Using a Single Camera", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252140861/12OmNwdtw6D", "parentPublication": { "id": "proceedings/icpr/2006/2521/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2009/3651/0/3651a155", "title": "Screen-Camera Calibration Using Gray Codes", "doi": null, "abstractUrl": "/proceedings-article/crv/2009/3651a155/12OmNxWLTjF", "parentPublication": { "id": "proceedings/crv/2009/3651/0", "title": "2009 Canadian Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1997/7822/0/78220482", "title": "Catadioptric Omnidirectional Camera", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1997/78220482/12OmNz2C1ze", "parentPublication": { "id": "proceedings/cvpr/1997/7822/0", "title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/06977410", "title": "Camera Calibration for Plate Refractive Imaging System", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/06977410/12OmNzC5T4s", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy4r3R2", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNyqzM2C", "doi": "10.1109/CVPR.2009.5206530", "title": "Stereographic rectification of omnidirectional stereo pairs", "normalizedTitle": "Stereographic rectification of omnidirectional stereo pairs", "abstract": "We present a general technique for rectification of a stereo pair acquired by a calibrated omnidirectional camera. Using this technique we formulate a new stereographic rectification method. Our rectification does not map epipolar curves onto lines as common rectification methods, but rather maps epipolar curves onto circles. We show that this rectification in a certain sense minimizes the distortion of the original omnidirectional images. We formulate the rectification for multiple images and show that the choice of the optimal projection center of the rectification is under certain circumstances equivalent to the classical problem of spherical minimax location. We demonstrate the behaviour and the quality of the rectification in real experiments with images from 180 degree field of view fish eye lenses.", "abstracts": [ { "abstractType": "Regular", "content": "We present a general technique for rectification of a stereo pair acquired by a calibrated omnidirectional camera. Using this technique we formulate a new stereographic rectification method. Our rectification does not map epipolar curves onto lines as common rectification methods, but rather maps epipolar curves onto circles. We show that this rectification in a certain sense minimizes the distortion of the original omnidirectional images. We formulate the rectification for multiple images and show that the choice of the optimal projection center of the rectification is under certain circumstances equivalent to the classical problem of spherical minimax location. We demonstrate the behaviour and the quality of the rectification in real experiments with images from 180 degree field of view fish eye lenses.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a general technique for rectification of a stereo pair acquired by a calibrated omnidirectional camera. Using this technique we formulate a new stereographic rectification method. Our rectification does not map epipolar curves onto lines as common rectification methods, but rather maps epipolar curves onto circles. We show that this rectification in a certain sense minimizes the distortion of the original omnidirectional images. We formulate the rectification for multiple images and show that the choice of the optimal projection center of the rectification is under certain circumstances equivalent to the classical problem of spherical minimax location. We demonstrate the behaviour and the quality of the rectification in real experiments with images from 180 degree field of view fish eye lenses.", "fno": "05206530", "keywords": [ "Cameras", "Photographic Lenses", "Stereo Image Processing", "Stereographic Rectification", "Omnidirectional Stereo Pairs", "Omnidirectional Camera", "Epipolar Curves", "Optimal Projection Center", "Fish Eye Lenses", "Epipolar Alignment", "Digital Cameras", "Geometry", "Geometrical Optics", "Calibration", "Cybernetics", "Minimax Techniques", "Marine Animals", "Lenses", "Minimization Methods", "Image Segmentation" ], "authors": [ { "affiliation": "Center for Machine Perception, Department of Cybernetics, Faculty of Elec. Eng., Czech Technical University in Prague, Czech Republic", "fullName": "Jan Heller", "givenName": "Jan", "surname": "Heller", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Machine Perception, Department of Cybernetics, Faculty of Elec. Eng., Czech Technical University in Prague, Czech Republic", "fullName": "Tomas Pajdla", "givenName": "Tomas", "surname": "Pajdla", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-06-01T00:00:00", "pubType": "proceedings", "pages": "1414-1421", "year": "2009", "issn": "1063-6919", "isbn": "978-1-4244-3992-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05206529", "articleId": "12OmNxcvh4x", "__typename": "AdjacentArticleType" }, "next": { "fno": "05206531", "articleId": "12OmNAIMO5s", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2011/0063/0/06130256", "title": "3D environment measurement using binocular stereo and motion stereo by mobile robot with omnidirectional stereo camera", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130256/12OmNBl6EHn", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2003/2105/7/210570073", "title": "Conformal Rectification of Omnidirectional Stereo Pairs", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2003/210570073/12OmNBpVPTj", "parentPublication": { "id": "proceedings/cvprw/2003/2105/7", "title": "2003 Conference on Computer Vision and Pattern Recognition Workshop - Volume 7", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2007/1695/0/04425344", "title": "Dense disparity estimation from omnidirectional images", "doi": null, "abstractUrl": "/proceedings-article/avss/2007/04425344/12OmNCwladA", "parentPublication": { "id": "proceedings/avss/2007/1695/0", "title": "2007 IEEE Conference on Advanced Video and Signal Based Surveillance, AVSS 2007", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/2/3336c903", "title": "A Linear and Aspect Ratio Invariant Rectification Method for Stereo Vision", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336c903/12OmNrMZpu5", "parentPublication": { "id": "proceedings/csse/2008/3336/6", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2010/3962/1/3962a322", "title": "A Robust Epipolar Rectification Method of Stereo Pairs", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2010/3962a322/12OmNvA1hvB", "parentPublication": { "id": "proceedings/icmtma/2010/3962/1", "title": "2010 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851c773", "title": "From Bows to Arrows: Rolling Shutter Rectification of Urban Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851c773/12OmNvD8RE9", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1997/7822/0/78220393", "title": "Cylindrical rectification to minimize epipolar distortion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1997/78220393/12OmNyvoX7R", "parentPublication": { "id": "proceedings/cvpr/1997/7822/0", "title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2008/3381/0/3381a150", "title": "View Synthesis for Virtual Walk through in Real Scene Based on Catadioptric Omnidirectional Images", "doi": null, "abstractUrl": "/proceedings-article/cw/2008/3381a150/12OmNz61dIC", "parentPublication": { "id": "proceedings/cw/2008/3381/0", "title": "2008 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2962", "title": "Uniform Subdivision of Omnidirectional Camera Space for Efficient Spherical Stereo Matching", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2962/1H1iRK3bg5O", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/11/09086445", "title": "End-to-End Learning for Omnidirectional Stereo Matching With Uncertainty Prior", "doi": null, "abstractUrl": "/journal/tp/2021/11/09086445/1jAciRM8xws", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1iRK3bg5O", "doi": "10.1109/CVPR52688.2022.01263", "title": "Uniform Subdivision of Omnidirectional Camera Space for Efficient Spherical Stereo Matching", "normalizedTitle": "Uniform Subdivision of Omnidirectional Camera Space for Efficient Spherical Stereo Matching", "abstract": "Omnidirectional cameras have been used widely to better understand surrounding environments. They are often configured as stereo to estimate depth. However, due to the optics of the fish eye lens, conventional epipolar geometry is inapplicable directly to omnidirectional camera images. Intermediate formats of omnidirectional images, such as equirect-angular images, have been used. However, stereo matching performance on these image formats has been lower than the conventional stereo due to severe image distortion near pole regions. In this paper, to address the distortion problem of omnidirectional images, we devise a novel subdivision scheme of a spherical geodesic grid. This enables more isotropic patch sampling of spherical image information in the omnidirectional camera space. By extending the existing equalarc scheme, our spherical geodesic grid is tessellated with an equalepiline subdivision scheme, making the cell sizes and in-between distances as uniform as possible, i.e., the arc length of the spherical grid cell's edges is well regularized. Also, our uniformly tessellated coordinates in a 2D image can be transformed into spherical coordinates via one-to-one mapping, allowing for analytical forward/backward transformation. Our uniform tessellation scheme achieves a higher accuracy of stereo matching than the traditional cylindrical and cubemap-based approaches, reducing the memory footage required for stereo matching by 20%.", "abstracts": [ { "abstractType": "Regular", "content": "Omnidirectional cameras have been used widely to better understand surrounding environments. They are often configured as stereo to estimate depth. However, due to the optics of the fish eye lens, conventional epipolar geometry is inapplicable directly to omnidirectional camera images. Intermediate formats of omnidirectional images, such as equirect-angular images, have been used. However, stereo matching performance on these image formats has been lower than the conventional stereo due to severe image distortion near pole regions. In this paper, to address the distortion problem of omnidirectional images, we devise a novel subdivision scheme of a spherical geodesic grid. This enables more isotropic patch sampling of spherical image information in the omnidirectional camera space. By extending the existing equalarc scheme, our spherical geodesic grid is tessellated with an equalepiline subdivision scheme, making the cell sizes and in-between distances as uniform as possible, i.e., the arc length of the spherical grid cell's edges is well regularized. Also, our uniformly tessellated coordinates in a 2D image can be transformed into spherical coordinates via one-to-one mapping, allowing for analytical forward/backward transformation. Our uniform tessellation scheme achieves a higher accuracy of stereo matching than the traditional cylindrical and cubemap-based approaches, reducing the memory footage required for stereo matching by 20%.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Omnidirectional cameras have been used widely to better understand surrounding environments. They are often configured as stereo to estimate depth. However, due to the optics of the fish eye lens, conventional epipolar geometry is inapplicable directly to omnidirectional camera images. Intermediate formats of omnidirectional images, such as equirect-angular images, have been used. However, stereo matching performance on these image formats has been lower than the conventional stereo due to severe image distortion near pole regions. In this paper, to address the distortion problem of omnidirectional images, we devise a novel subdivision scheme of a spherical geodesic grid. This enables more isotropic patch sampling of spherical image information in the omnidirectional camera space. By extending the existing equalarc scheme, our spherical geodesic grid is tessellated with an equalepiline subdivision scheme, making the cell sizes and in-between distances as uniform as possible, i.e., the arc length of the spherical grid cell's edges is well regularized. Also, our uniformly tessellated coordinates in a 2D image can be transformed into spherical coordinates via one-to-one mapping, allowing for analytical forward/backward transformation. Our uniform tessellation scheme achieves a higher accuracy of stereo matching than the traditional cylindrical and cubemap-based approaches, reducing the memory footage required for stereo matching by 20%.", "fno": "694600m2962", "keywords": [ "Cameras", "Computational Geometry", "Image Matching", "Image Sensors", "Stereo Image Processing", "Image Formats", "Conventional Stereo", "Severe Image Distortion", "Omnidirectional Images", "Novel Subdivision Scheme", "Spherical Geodesic Grid", "Spherical Image Information", "Omnidirectional Camera Space", "Existing Equalarc Scheme", "Equalepiline Subdivision Scheme", "Spherical Grid Cell", "Uniformly Tessellated Coordinates", "Spherical Coordinates", "Uniform Tessellation Scheme", "Uniform Subdivision", "Efficient Spherical Stereo", "Omnidirectional Cameras", "Fish Eye Lens", "Conventional Epipolar Geometry", "Omnidirectional Camera Images", "Intermediate Formats", "Equirect Angular Images", "Stereo Matching Performance", "Geometry", "Image Resolution", "Image Edge Detection", "Memory Management", "Optical Distortion", "Cameras", "Distortion" ], "authors": [ { "affiliation": "KAIST", "fullName": "Donghun Kang", "givenName": "Donghun", "surname": "Kang", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST", "fullName": "Hyeonjoong Jang", "givenName": "Hyeonjoong", "surname": "Jang", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST", "fullName": "Jungeon Lee", "givenName": "Jungeon", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST", "fullName": "Chong-Min Kyung", "givenName": "Chong-Min", "surname": "Kyung", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST", "fullName": "Min H. Kim", "givenName": "Min H.", "surname": "Kim", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "12962-12970", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1iRGFHi6c", "name": "pcvpr202269460-09878512s1-mm_694600m2962.zip", "size": "4.24 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878512s1-mm_694600m2962.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600m2952", "articleId": "1H0LlFQ1RdK", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600m2971", "articleId": "1H1kLFunhJe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2009/4442/0/05457429", "title": "Environment modelling using spherical stereo imaging", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457429/12OmNBuL14N", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04760988", "title": "Super-resolution from unregistered omnidirectional images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04760988/12OmNqHqSvm", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2006/2521/3/252131046", "title": "Real-Time Spherical Stereo", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252131046/12OmNqOwQFC", "parentPublication": { "id": "proceedings/icpr/2006/2521/3", "title": "2006 18th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206530", "title": "Stereographic rectification of omnidirectional stereo pairs", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206530/12OmNyqzM2C", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486584", "title": "Spherical Structural Similarity Index for Objective Omnidirectional Video Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486584/14jQfPogtSp", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545697", "title": "Foreground Enlargement of Omnidirectional Images by Spherical Trigonometry", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545697/17D45X7VTgq", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956197", "title": "On Depth Error from Spherical Camera Calibration within Omnidirectional Stereo Vision", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956197/1IHqnsyNjbO", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2019/3263/0/08747326", "title": "Episcan360: Active Epipolar Imaging for Live Omni-directional Stereo", "doi": null, "abstractUrl": "/proceedings-article/iccp/2019/08747326/1bcJxL9EdLW", "parentPublication": { "id": "proceedings/iccp/2019/3263/0", "title": "2019 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800m2423", "title": "Tangent Images for Mitigating Spherical Distortion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800m2423/1m3o7SSxlT2", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900l1418", "title": "Real-Time Sphere Sweeping Stereo from Multiview Fisheye Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900l1418/1yeHJLiupiM", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1IHotVZum6Q", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "9956007", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1IHqnsyNjbO", "doi": "10.1109/ICPR56361.2022.9956197", "title": "On Depth Error from Spherical Camera Calibration within Omnidirectional Stereo Vision", "normalizedTitle": "On Depth Error from Spherical Camera Calibration within Omnidirectional Stereo Vision", "abstract": "As a depth sensing approach, whilst stereo vision provides a good compromise between accuracy and cost, a key limitation is the limited field of view of the conventional cameras that are used within most stereo configurations. By contrast, the use of spherical cameras within a stereo configuration offers omnidirectional stereo sensing. However, despite the presence of significant image distortion in spherical camera images, only very limited attempts have been made to study and quantify omnidirectional stereo depth accuracy.In this paper we construct such an omnidirectional stereo system that is capable of real-time 360° disparity map reconstruction as the basis for such a study. We first investigate the accuracy of using a standard spherical camera model for calibration combined with a longitude-latitude projection for omnidirectional stereo, and show that the depth error increases significantly as the angle from the camera optical axis approaches the limits of the camera field of view.In contrast, we then consider an alternative calibration approach via the use of perspective undistortion with a conventional pinhole camera model allowing omnidirectional cameras to be mapped to a conventional rectilinear stereo formulation. We find that conversely this proposed approach exhibits improved depth accuracy at large angles from the camera optical axis when compared to omnidirectional stereo depth based on a spherical camera model calibration.", "abstracts": [ { "abstractType": "Regular", "content": "As a depth sensing approach, whilst stereo vision provides a good compromise between accuracy and cost, a key limitation is the limited field of view of the conventional cameras that are used within most stereo configurations. By contrast, the use of spherical cameras within a stereo configuration offers omnidirectional stereo sensing. However, despite the presence of significant image distortion in spherical camera images, only very limited attempts have been made to study and quantify omnidirectional stereo depth accuracy.In this paper we construct such an omnidirectional stereo system that is capable of real-time 360° disparity map reconstruction as the basis for such a study. We first investigate the accuracy of using a standard spherical camera model for calibration combined with a longitude-latitude projection for omnidirectional stereo, and show that the depth error increases significantly as the angle from the camera optical axis approaches the limits of the camera field of view.In contrast, we then consider an alternative calibration approach via the use of perspective undistortion with a conventional pinhole camera model allowing omnidirectional cameras to be mapped to a conventional rectilinear stereo formulation. We find that conversely this proposed approach exhibits improved depth accuracy at large angles from the camera optical axis when compared to omnidirectional stereo depth based on a spherical camera model calibration.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As a depth sensing approach, whilst stereo vision provides a good compromise between accuracy and cost, a key limitation is the limited field of view of the conventional cameras that are used within most stereo configurations. By contrast, the use of spherical cameras within a stereo configuration offers omnidirectional stereo sensing. However, despite the presence of significant image distortion in spherical camera images, only very limited attempts have been made to study and quantify omnidirectional stereo depth accuracy.In this paper we construct such an omnidirectional stereo system that is capable of real-time 360° disparity map reconstruction as the basis for such a study. We first investigate the accuracy of using a standard spherical camera model for calibration combined with a longitude-latitude projection for omnidirectional stereo, and show that the depth error increases significantly as the angle from the camera optical axis approaches the limits of the camera field of view.In contrast, we then consider an alternative calibration approach via the use of perspective undistortion with a conventional pinhole camera model allowing omnidirectional cameras to be mapped to a conventional rectilinear stereo formulation. We find that conversely this proposed approach exhibits improved depth accuracy at large angles from the camera optical axis when compared to omnidirectional stereo depth based on a spherical camera model calibration.", "fno": "09956197", "keywords": [ "Calibration", "Cameras", "Image Reconstruction", "Image Sensors", "Stereo Image Processing", "Alternative Calibration Approach", "Camera Optical Axis", "Conventional Cameras", "Conventional Pinhole Camera Model", "Conventional Rectilinear Stereo Formulation", "Depth Error", "Depth Sensing Approach", "Key Limitation", "Map Reconstruction", "Omnidirectional Cameras", "Omnidirectional Stereo Depth Accuracy", "Omnidirectional Stereo Sensing", "Omnidirectional Stereo System", "Omnidirectional Stereo Vision", "Significant Image Distortion", "Spherical Camera Calibration", "Spherical Camera Images", "Spherical Camera Model Calibration", "Spherical Cameras", "Standard Spherical Camera Model", "Stereo Configuration", "Integrated Optics", "Optical Distortion", "Cameras", "Optical Imaging", "Adaptive Optics", "Real Time Systems", "Calibration" ], "authors": [ { "affiliation": "Durham University,Department of Engineering,Durham,UK", "fullName": "Michael Groom", "givenName": "Michael", "surname": "Groom", "__typename": "ArticleAuthorType" }, { "affiliation": "Durham University,Department of Engineering,Durham,UK", "fullName": "Toby P. Breckon", "givenName": "Toby P.", "surname": "Breckon", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "3987-3993", "year": "2022", "issn": null, "isbn": "978-1-6654-9062-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09956365", "articleId": "1IHp3Rccu2c", "__typename": "AdjacentArticleType" }, "next": { "fno": "09956132", "articleId": "1IHq391uoAE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2011/0063/0/06130256", "title": "3D environment measurement using binocular stereo and motion stereo by mobile robot with omnidirectional stereo camera", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130256/12OmNBl6EHn", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2009/4442/0/05457429", "title": "Environment modelling using spherical stereo imaging", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457429/12OmNBuL14N", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2005/2467/0/24670026", "title": "Auto-Calibration of a Compound-Type Omnidirectional Camera", "doi": null, "abstractUrl": "/proceedings-article/dicta/2005/24670026/12OmNvDZF0Q", "parentPublication": { "id": "proceedings/dicta/2005/2467/0", "title": "Digital Image Computing: Techniques and Applications (DICTA'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2006/2521/4/252140861", "title": "An Omnidirectional Stereo Vision System Using a Single Camera", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252140861/12OmNwdtw6D", "parentPublication": { "id": "proceedings/icpr/2006/2521/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477646", "title": "Geometric calibration for mobile, stereo, autofocus cameras", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477646/12OmNyO8tKJ", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206530", "title": "Stereographic rectification of omnidirectional stereo pairs", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206530/12OmNyqzM2C", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486584", "title": "Spherical Structural Similarity Index for Objective Omnidirectional Video Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486584/14jQfPogtSp", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2962", "title": "Uniform Subdivision of Omnidirectional Camera Space for Efficient Spherical Stereo Matching", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2962/1H1iRK3bg5O", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300i986", "title": "OmniMVS: End-to-End Learning for Omnidirectional Stereo Matching", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300i986/1hQqlZvLEu4", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/11/09086445", "title": "End-to-End Learning for Omnidirectional Stereo Matching With Uncertainty Prior", "doi": null, "abstractUrl": "/journal/tp/2021/11/09086445/1jAciRM8xws", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIxhEnA8IE", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxAbDrq7K", "doi": "10.1109/VRW50115.2020.00079", "title": "Eating together while being apart: A pilot study on the effects of mixed-reality conversations and virtual environments on older eaters’ solitary meal experience and food intake", "normalizedTitle": "Eating together while being apart: A pilot study on the effects of mixed-reality conversations and virtual environments on older eaters’ solitary meal experience and food intake", "abstract": "The aim of this study was to investigate the potential of mixed-reality systems to virtually manipulate the eating experience and facilitate increased food intake among older participants. Social isolation is often associated with undernourishment among older adults receiving care services. Mixed-reality systems that blend real elements and a virtual world can conveniently allow older adults to eat a meal in their home while experiencing having a conversation with friends through virtual avatars in a virtual environment. A within-subjects study on thirty older participants investigated whether the mixed-reality illusion of eating in a living room with and without familiar others contributed positively to the meal experience and increased energy intake. The results did not display any significant changes in energy intake but highlighted that the virtual living room had a more energetic and pleasant atmosphere and that meals eaten in the virtual room were perceived to be of a higher quality compared to meals eaten in the real lab environment. Eating while engaging in avatar-based social interactions with three remotely located friends resulted in lower sensations of being alone and positive mood changes. A discussion of the reasons for the absence of increases in energy intake is included.", "abstracts": [ { "abstractType": "Regular", "content": "The aim of this study was to investigate the potential of mixed-reality systems to virtually manipulate the eating experience and facilitate increased food intake among older participants. Social isolation is often associated with undernourishment among older adults receiving care services. Mixed-reality systems that blend real elements and a virtual world can conveniently allow older adults to eat a meal in their home while experiencing having a conversation with friends through virtual avatars in a virtual environment. A within-subjects study on thirty older participants investigated whether the mixed-reality illusion of eating in a living room with and without familiar others contributed positively to the meal experience and increased energy intake. The results did not display any significant changes in energy intake but highlighted that the virtual living room had a more energetic and pleasant atmosphere and that meals eaten in the virtual room were perceived to be of a higher quality compared to meals eaten in the real lab environment. Eating while engaging in avatar-based social interactions with three remotely located friends resulted in lower sensations of being alone and positive mood changes. A discussion of the reasons for the absence of increases in energy intake is included.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The aim of this study was to investigate the potential of mixed-reality systems to virtually manipulate the eating experience and facilitate increased food intake among older participants. Social isolation is often associated with undernourishment among older adults receiving care services. Mixed-reality systems that blend real elements and a virtual world can conveniently allow older adults to eat a meal in their home while experiencing having a conversation with friends through virtual avatars in a virtual environment. A within-subjects study on thirty older participants investigated whether the mixed-reality illusion of eating in a living room with and without familiar others contributed positively to the meal experience and increased energy intake. The results did not display any significant changes in energy intake but highlighted that the virtual living room had a more energetic and pleasant atmosphere and that meals eaten in the virtual room were perceived to be of a higher quality compared to meals eaten in the real lab environment. Eating while engaging in avatar-based social interactions with three remotely located friends resulted in lower sensations of being alone and positive mood changes. A discussion of the reasons for the absence of increases in energy intake is included.", "fno": "09090427", "keywords": [ "Resists", "Avatars", "Atmospheric Measurements", "Mood", "Atmospheric Modeling", "Information Systems", "Information Systems Applications", "Collaborative And Social Computing Systems And Tools", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms Mixed Augmented Reality" ], "authors": [ { "affiliation": "Architecture, Design and Media Technology at Aalborg University", "fullName": "Dannie Korsgaard", "givenName": "Dannie", "surname": "Korsgaard", "__typename": "ArticleAuthorType" }, { "affiliation": "Architecture, Design and Media Technology at Aalborg University", "fullName": "Thomas Bjørner", "givenName": "Thomas", "surname": "Bjørner", "__typename": "ArticleAuthorType" }, { "affiliation": "Architecture, Design and Media Technology at Aalborg University", "fullName": "Jon R. Bruun-Pedersen", "givenName": "Jon R.", "surname": "Bruun-Pedersen", "__typename": "ArticleAuthorType" }, { "affiliation": "Educational Anthropology at Aarhus University", "fullName": "Pernille K. Sørensen", "givenName": "Pernille K.", "surname": "Sørensen", "__typename": "ArticleAuthorType" }, { "affiliation": "Food Science at Copenhagen University", "fullName": "Federico J. A. Perez-Cueto", "givenName": "Federico J. A.", "surname": "Perez-Cueto", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "365-370", "year": "2020", "issn": null, "isbn": "978-1-7281-6532-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09090439", "articleId": "1jIxi9uE4H6", "__typename": "AdjacentArticleType" }, "next": { "fno": "09090474", "articleId": "1jIxkHvxy1O", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icalt/2015/7334/0/7334a428", "title": "Effects of Somatosensory Video Games on Simple Reactions of Institutional-Dwelling Older Adults with Mild-Cognitive Impairments", "doi": null, "abstractUrl": "/proceedings-article/icalt/2015/7334a428/12OmNB8Cj2K", "parentPublication": { "id": "proceedings/icalt/2015/7334/0", "title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/chase/2016/0943/0/0943a248", "title": "Recognizing Eating Gestures Using Context Dependent Hidden Markov Models", "doi": null, "abstractUrl": "/proceedings-article/chase/2016/0943a248/12OmNqzcvLm", "parentPublication": { "id": "proceedings/chase/2016/0943/0", "title": "2016 IEEE First International Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wevr/2017/3881/0/07957709", "title": "Immersive eating: evaluating the use of head-mounted displays for mixed reality meal sessions", "doi": null, "abstractUrl": "/proceedings-article/wevr/2017/07957709/12OmNwK7o9G", "parentPublication": { "id": "proceedings/wevr/2017/3881/0", "title": "2017 IEEE 3rd Workshop on Everyday Virtual Reality (WEVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890718", "title": "Real-time eating action recognition system on a smartphone", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890718/12OmNwlHSZp", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2016/6117/0/6117a216", "title": "Going Outside While Staying Inside — Exercise Motivation with Immersive vs. Non–immersive Recreational Virtual Environment Augmentation for Older Adult Nursing Home Residents", "doi": null, "abstractUrl": "/proceedings-article/ichi/2016/6117a216/12OmNxwncza", "parentPublication": { "id": "proceedings/ichi/2016/6117/0", "title": "2016 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percomw/2018/3227/0/08480404", "title": "Cordon Gris: Integrated solution for meal recommendations", "doi": null, "abstractUrl": "/proceedings-article/percomw/2018/08480404/17D45VTRoq2", "parentPublication": { "id": "proceedings/percomw/2018/3227/0", "title": "2018 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2017/4338/0/07917597", "title": "Investigating barriers and facilitators to wearable adherence in fine-grained eating detection", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2017/07917597/19wAG19CJiM", "parentPublication": { "id": "proceedings/percom-workshops/2017/4338/0", "title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2022/6845/0/684500a330", "title": "Practicality of Automatic Monitoring Sufficient Fluid Intake for Older People", "doi": null, "abstractUrl": "/proceedings-article/ichi/2022/684500a330/1GvdD7RZnAA", "parentPublication": { "id": "proceedings/ichi/2022/6845/0", "title": "2022 IEEE 10th International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956550", "title": "A New Video Dataset for Recognizing Intake Gestures in a Cafeteria Setting", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956550/1IHoYn2vtSM", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vhcie/2017/2758/0/07935624", "title": "Evaluating collision avoidance effects on discomfort in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vhcie/2017/07935624/1h0Lhmayehq", "parentPublication": { "id": "proceedings/vhcie/2017/2758/0", "title": "2017 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrIaeeX", "title": "2017 IEEE 23rd International Conference on Parallel and Distributed Systems (ICPADS)", "acronym": "icpads", "groupId": "1000534", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNs5rkPT", "doi": "10.1109/ICPADS.2017.00027", "title": "SoundWrite II: Ambient Acoustic Sensing for Noise Tolerant Device-Free Gesture Recognition", "normalizedTitle": "SoundWrite II: Ambient Acoustic Sensing for Noise Tolerant Device-Free Gesture Recognition", "abstract": "Acoustic sensing has brought forth the advances of prosperous applications such as gesture recognition. Specifically, ambient acoustic sensing has drawn many contentions due to the ease of use property. Unfortunately, the inherent ambient noise is the major reason for unstable gesture recognition. In this work, we propose “SoundWrite II”, which is an improved version of our previously designed system. Compared with our previous design, we utilize the two threshold values to identify the effective signals from the original noisy input, and leverage the MFCC (Mel frequency cepstral coefficient) to extract the stable features from different gestures. These enhancements could effectively improve the noise tolerant performance for previous design. Implementation on the Android system has realized the real time processing of the feature extraction and gesture recognition. Extensive evaluations have validated our design, where the noise tolerant property is fully tested under different experimental settings and the recognition accuracy could be 91% with 7 typical gestures.", "abstracts": [ { "abstractType": "Regular", "content": "Acoustic sensing has brought forth the advances of prosperous applications such as gesture recognition. Specifically, ambient acoustic sensing has drawn many contentions due to the ease of use property. Unfortunately, the inherent ambient noise is the major reason for unstable gesture recognition. In this work, we propose “SoundWrite II”, which is an improved version of our previously designed system. Compared with our previous design, we utilize the two threshold values to identify the effective signals from the original noisy input, and leverage the MFCC (Mel frequency cepstral coefficient) to extract the stable features from different gestures. These enhancements could effectively improve the noise tolerant performance for previous design. Implementation on the Android system has realized the real time processing of the feature extraction and gesture recognition. Extensive evaluations have validated our design, where the noise tolerant property is fully tested under different experimental settings and the recognition accuracy could be 91% with 7 typical gestures.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Acoustic sensing has brought forth the advances of prosperous applications such as gesture recognition. Specifically, ambient acoustic sensing has drawn many contentions due to the ease of use property. Unfortunately, the inherent ambient noise is the major reason for unstable gesture recognition. In this work, we propose “SoundWrite II”, which is an improved version of our previously designed system. Compared with our previous design, we utilize the two threshold values to identify the effective signals from the original noisy input, and leverage the MFCC (Mel frequency cepstral coefficient) to extract the stable features from different gestures. These enhancements could effectively improve the noise tolerant performance for previous design. Implementation on the Android system has realized the real time processing of the feature extraction and gesture recognition. Extensive evaluations have validated our design, where the noise tolerant property is fully tested under different experimental settings and the recognition accuracy could be 91% with 7 typical gestures.", "fno": "212901a121", "keywords": [ "Acoustic Signal Processing", "Cepstral Analysis", "Feature Extraction", "Gesture Recognition", "Sound Write II", "Ambient Acoustic Sensing", "Noise Tolerant Device Free Gesture Recognition", "Inherent Ambient Noise", "Unstable Gesture Recognition", "Mel Frequency Cepstral Coefficient", "Noise Tolerant Performance", "Noise Tolerant Property", "Android System", "Feature Extraction", "Mel Frequency Cepstral Coefficient", "Gesture Recognition", "Sensors", "Microphones", "Smart Phones", "Acoustic Sensing", "Gesture Recognition", "MFCC", "Device Free", "Android Implementation" ], "authors": [ { "affiliation": null, "fullName": "Gan Luo", "givenName": "Gan", "surname": "Luo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mingshi Chen", "givenName": "Mingshi", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ping Li", "givenName": "Ping", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Maotian Zhang", "givenName": "Maotian", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Panlong Yang", "givenName": "Panlong", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpads", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-12-01T00:00:00", "pubType": "proceedings", "pages": "121-126", "year": "2017", "issn": "1521-9097", "isbn": "978-1-5386-2129-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "212901a113", "articleId": "12OmNAq3hKR", "__typename": "AdjacentArticleType" }, "next": { "fno": "212901a127", "articleId": "12OmNs0C9IH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2000/0580/0/05800422", "title": "Exploiting Speech/Gesture Co-occurrence for Improving Continuous Gesture Recognition in Weather Narration", "doi": null, "abstractUrl": "/proceedings-article/fg/2000/05800422/12OmNCwUmBP", "parentPublication": { "id": "proceedings/fg/2000/0580/0", "title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/waina/2012/4652/0/4652a007", "title": "Separator Design of Gesture Signals Based on Adaptive Threshold Using Wearable Sensors", "doi": null, "abstractUrl": "/proceedings-article/waina/2012/4652a007/12OmNyUWR90", "parentPublication": { "id": "proceedings/waina/2012/4652/0", "title": "2012 26th International Conference on Advanced Information Networking and Applications Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcse/2009/3570/1/3570a217", "title": "Gesture-Based Chemical Formula Editing System", "doi": null, "abstractUrl": "/proceedings-article/wcse/2009/3570a217/12OmNzEVRY6", "parentPublication": { "id": "proceedings/wcse/2009/3570/1", "title": "2009 WRI World Congress on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2015/01/mpc2015010018", "title": "Engineering Gesture-Based Authentication Systems", "doi": null, "abstractUrl": "/magazine/pc/2015/01/mpc2015010018/13rRUwI5U07", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/09774919", "title": "DSW: One-shot Learning Scheme for Device-free Acoustic Gesture Signals", "doi": null, "abstractUrl": "/journal/tm/5555/01/09774919/1Dlifok5mbC", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipccc/2018/6808/0/08710858", "title": "iPand: Accurate Gesture Input with Ambient Acoustic Sensing on Hand", "doi": null, "abstractUrl": "/proceedings-article/ipccc/2018/08710858/1axfEr9uTyE", "parentPublication": { "id": "proceedings/ipccc/2018/6808/0", "title": "2018 IEEE 37th International Performance Computing and Communications Conference (IPCCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/05/09229520", "title": "Push the Limit of Acoustic Gesture Recognition", "doi": null, "abstractUrl": "/journal/tm/2022/05/09229520/1o3njwLFEqI", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/07/09253526", "title": "UltraGesture: Fine-Grained Gesture Sensing and Recognition", "doi": null, "abstractUrl": "/journal/tm/2022/07/09253526/1oDXB816aGc", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-cbdcom-cyberscitech/2020/6609/0/660900a420", "title": "Recognizing Human Gestures Using Ambient Light", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2020/660900a420/1oFGTbjz4ac", "parentPublication": { "id": "proceedings/dasc-picom-cbdcom-cyberscitech/2020/6609/0", "title": "2020 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcom/2021/4252/0/425200a098", "title": "SignID: Acoustic-based Identification with Single Sign Gesture", "doi": null, "abstractUrl": "/proceedings-article/bigcom/2021/425200a098/1xlA0Ne4aGc", "parentPublication": { "id": "proceedings/bigcom/2021/4252/0", "title": "2021 7th International Conference on Big Data Computing and Communications (BigCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1T91brrO", "doi": "10.1109/ISMAR-Adjunct.2018.00045", "title": "Egocentric Gesture Recognition for Head-Mounted AR Devices", "normalizedTitle": "Egocentric Gesture Recognition for Head-Mounted AR Devices", "abstract": "Natural interaction with virtual objects in AR/VR environments makes for a smooth user experience. Gestures are a natural extension from real world to augmented space to achieve these interactions. Finding discriminating spatio-temporal features relevant to gestures and hands in ego-view is the primary challenge for recognising egocentric gestures. In this work we propose a data driven end-to-end deep learning approach to address the problem of egocentric gesture recognition, which combines an ego-hand encoder network to find ego-hand features, and a recurrent neural network to discern temporally discriminating features. Since deep learning networks are data intensive, we propose a novel data augmentation technique using green screen capture to alleviate the problem of ground truth annotation. In addition we publish a dataset of 10 gestures performed in a natural fashion in front of a green screen for training and the same 10 gestures performed in different natural scenes without green screen for validation. We also present the results of our network's performance in comparison to the state-of-the-art using the AirGest dataset.", "abstracts": [ { "abstractType": "Regular", "content": "Natural interaction with virtual objects in AR/VR environments makes for a smooth user experience. Gestures are a natural extension from real world to augmented space to achieve these interactions. Finding discriminating spatio-temporal features relevant to gestures and hands in ego-view is the primary challenge for recognising egocentric gestures. In this work we propose a data driven end-to-end deep learning approach to address the problem of egocentric gesture recognition, which combines an ego-hand encoder network to find ego-hand features, and a recurrent neural network to discern temporally discriminating features. Since deep learning networks are data intensive, we propose a novel data augmentation technique using green screen capture to alleviate the problem of ground truth annotation. In addition we publish a dataset of 10 gestures performed in a natural fashion in front of a green screen for training and the same 10 gestures performed in different natural scenes without green screen for validation. We also present the results of our network's performance in comparison to the state-of-the-art using the AirGest dataset.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Natural interaction with virtual objects in AR/VR environments makes for a smooth user experience. Gestures are a natural extension from real world to augmented space to achieve these interactions. Finding discriminating spatio-temporal features relevant to gestures and hands in ego-view is the primary challenge for recognising egocentric gestures. In this work we propose a data driven end-to-end deep learning approach to address the problem of egocentric gesture recognition, which combines an ego-hand encoder network to find ego-hand features, and a recurrent neural network to discern temporally discriminating features. Since deep learning networks are data intensive, we propose a novel data augmentation technique using green screen capture to alleviate the problem of ground truth annotation. In addition we publish a dataset of 10 gestures performed in a natural fashion in front of a green screen for training and the same 10 gestures performed in different natural scenes without green screen for validation. We also present the results of our network's performance in comparison to the state-of-the-art using the AirGest dataset.", "fno": "08699166", "keywords": [ "Augmented Reality", "Gesture Recognition", "Helmet Mounted Displays", "Human Computer Interaction", "Learning Artificial Intelligence", "Recurrent Neural Nets", "User Experience", "Egocentric Gesture Recognition", "Head Mounted AR Devices", "AR VR Environments", "Smooth User Experience", "Ego Hand Encoder Network", "Ego Hand Features", "Recurrent Neural Network", "Deep Learning Networks", "Green Screen Capture", "Deep Learning", "Data Augmentation Technique", "Green Products", "Databases", "Gesture Recognition", "Cameras", "Deep Learning", "Training", "Recurrent Neural Networks", "Egocentric Gesture Recognition X 2014 Deep Learning X 2014 LST Ms X 2014", "Human Computer Interfaces X 2014 Natural Gestures" ], "authors": [ { "affiliation": "Trinity College Dublin, V-SENSE School of Computer Science and Statistics", "fullName": "Tejo Chalasani", "givenName": "Tejo", "surname": "Chalasani", "__typename": "ArticleAuthorType" }, { "affiliation": "Trinity College Dublin, V-SENSE School of Computer Science and Statistics", "fullName": "Jan Ondrej", "givenName": "Jan", "surname": "Ondrej", "__typename": "ArticleAuthorType" }, { "affiliation": "Trinity College Dublin, V-SENSE School of Computer Science and Statistics", "fullName": "Aljosa Smolic", "givenName": "Aljosa", "surname": "Smolic", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "109-114", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699286", "articleId": "19F1VntaVYQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699311", "articleId": "19F1NSFj2sU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ic3/2014/5172/0/06897148", "title": "A wireless dynamic gesture user interface for HCI using hand data glove", "doi": null, "abstractUrl": "/proceedings-article/ic3/2014/06897148/12OmNBCqbzO", "parentPublication": { "id": "proceedings/ic3/2014/5172/0", "title": "2014 Seventh International Conference on Contemporary Computing (IC3)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836511", "title": "GestAR: Real Time Gesture Interaction for AR with Egocentric View", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836511/12OmNBgz4AT", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2000/0580/0/05800422", "title": "Exploiting Speech/Gesture Co-occurrence for Improving Continuous Gesture Recognition in Weather Narration", "doi": null, "abstractUrl": "/proceedings-article/fg/2000/05800422/12OmNCwUmBP", "parentPublication": { "id": "proceedings/fg/2000/0580/0", "title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2012/1204/0/06184206", "title": "Poster: Head gesture 3D interface using a head mounted camera", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184206/12OmNzayNwg", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciis/1999/0446/0/04460328", "title": "Toward Multimodal Interpretation in a Natural Speech/Gesture Interface", "doi": null, "abstractUrl": "/proceedings-article/iciis/1999/04460328/12OmNzuZUnH", "parentPublication": { "id": "proceedings/iciis/1999/0446/0", "title": "Information, Intelligence, and Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2008/08/ttp2008081330", "title": "Analysis of Head Gesture and Prosody Patterns for Prosody-Driven Head-Gesture Animation", "doi": null, "abstractUrl": "/journal/tp/2008/08/ttp2008081330/13rRUNvPLaM", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300e367", "title": "Simultaneous Segmentation and Recognition: Towards More Accurate Ego Gesture Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300e367/1i5mDjY9lAs", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150823", "title": "CatNet: Class Incremental 3D ConvNets for Lifelong Egocentric Gesture Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150823/1lPHrk4YbQs", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiea/2020/8288/0/828800a062", "title": "Intelligent Classification of Multi-Gesture EMG Signals Based on LSTM", "doi": null, "abstractUrl": "/proceedings-article/aiea/2020/828800a062/1nTuiTDPLjO", "parentPublication": { "id": "proceedings/aiea/2020/8288/0", "title": "2020 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icectt/2020/9928/0/992800a203", "title": "The Conversion of the Production Mode of Film Green Screen Visual Effects in the Setting of 5G Technology", "doi": null, "abstractUrl": "/proceedings-article/icectt/2020/992800a203/1oa5iiXGv1C", "parentPublication": { "id": "proceedings/icectt/2020/9928/0", "title": "2020 5th International Conference on Electromechanical Control Technology and Transportation (ICECTT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MNgk3BHlS0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1MNgCnmbXyM", "doi": "10.1109/VR55154.2023.00026", "title": "Real-Time Recognition of In-Place Body Actions and Head Gestures using Only a Head-Mounted Display", "normalizedTitle": "Real-Time Recognition of In-Place Body Actions and Head Gestures using Only a Head-Mounted Display", "abstract": "Body actions and head gestures are natural interfaces for interaction in virtual environments. Existing methods for in-place body action recognition often require hardware more than a head-mounted display (HMD), making body action interfaces difficult to be introduced to ordinary virtual reality (VR) users as they usually only possess an HMD. In addition, there lacks a unified solution to recognize in-place body actions and head gestures. This potentially hinders the exploration of the use of in-place body actions and head gestures for novel interaction experiences in virtual environments. We present a unified two-stream 1-D convolutional neural network (CNN) for recognition of body actions when a user performs walking-in-place (WIP) and for recognition of head gestures when a user stands still wearing only an HMD. Compared to previous approaches, our method does not require specialized hardware and/or additional tracking devices other than an HMD and can recognize a significantly larger number of body actions and head gestures than other existing methods. In total, ten in-place body actions and eight head gestures can be recognized with the proposed method, which makes this method a readily available body action interface (head gestures included) for interaction with virtual environments. We demonstrate one utility of the interface through a virtual locomotion task. Results show that the present body action interface is reliable in detecting body actions for the VR locomotion task but is physically demanding compared to a touch controller interface. The present body action interface is promising for new VR experiences and applications, especially for VR fitness applications where workouts are intended.", "abstracts": [ { "abstractType": "Regular", "content": "Body actions and head gestures are natural interfaces for interaction in virtual environments. Existing methods for in-place body action recognition often require hardware more than a head-mounted display (HMD), making body action interfaces difficult to be introduced to ordinary virtual reality (VR) users as they usually only possess an HMD. In addition, there lacks a unified solution to recognize in-place body actions and head gestures. This potentially hinders the exploration of the use of in-place body actions and head gestures for novel interaction experiences in virtual environments. We present a unified two-stream 1-D convolutional neural network (CNN) for recognition of body actions when a user performs walking-in-place (WIP) and for recognition of head gestures when a user stands still wearing only an HMD. Compared to previous approaches, our method does not require specialized hardware and/or additional tracking devices other than an HMD and can recognize a significantly larger number of body actions and head gestures than other existing methods. In total, ten in-place body actions and eight head gestures can be recognized with the proposed method, which makes this method a readily available body action interface (head gestures included) for interaction with virtual environments. We demonstrate one utility of the interface through a virtual locomotion task. Results show that the present body action interface is reliable in detecting body actions for the VR locomotion task but is physically demanding compared to a touch controller interface. The present body action interface is promising for new VR experiences and applications, especially for VR fitness applications where workouts are intended.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Body actions and head gestures are natural interfaces for interaction in virtual environments. Existing methods for in-place body action recognition often require hardware more than a head-mounted display (HMD), making body action interfaces difficult to be introduced to ordinary virtual reality (VR) users as they usually only possess an HMD. In addition, there lacks a unified solution to recognize in-place body actions and head gestures. This potentially hinders the exploration of the use of in-place body actions and head gestures for novel interaction experiences in virtual environments. We present a unified two-stream 1-D convolutional neural network (CNN) for recognition of body actions when a user performs walking-in-place (WIP) and for recognition of head gestures when a user stands still wearing only an HMD. Compared to previous approaches, our method does not require specialized hardware and/or additional tracking devices other than an HMD and can recognize a significantly larger number of body actions and head gestures than other existing methods. In total, ten in-place body actions and eight head gestures can be recognized with the proposed method, which makes this method a readily available body action interface (head gestures included) for interaction with virtual environments. We demonstrate one utility of the interface through a virtual locomotion task. Results show that the present body action interface is reliable in detecting body actions for the VR locomotion task but is physically demanding compared to a touch controller interface. The present body action interface is promising for new VR experiences and applications, especially for VR fitness applications where workouts are intended.", "fno": "481500a105", "keywords": [ "Head Mounted Displays", "Three Dimensional Displays", "Virtual Environments", "Resists", "User Interfaces", "Hardware", "Real Time Systems", "Body Action Recognition", "Head Gesture Recognition", "Virtual Locomotion" ], "authors": [ { "affiliation": "College of Information and Electrical Engineering, China Agricultural University", "fullName": "Jingbo Zhao", "givenName": "Jingbo", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "College of Information and Electrical Engineering, China Agricultural University", "fullName": "Mingjun Shao", "givenName": "Mingjun", "surname": "Shao", "__typename": "ArticleAuthorType" }, { "affiliation": "College of Information and Electrical Engineering, China Agricultural University", "fullName": "Yaojun Wang", "givenName": "Yaojun", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Duke University,Department of Electrical and Computer Engineering", "fullName": "Ruolin Xu", "givenName": "Ruolin", "surname": "Xu", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-03-01T00:00:00", "pubType": "proceedings", "pages": "105-114", "year": "2023", "issn": null, "isbn": "979-8-3503-4815-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1MNgCgLrAfS", "name": "pvr202348150-010108456s1-mm_481500a105.zip", "size": "57.1 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108456s1-mm_481500a105.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "481500a094", "articleId": "1MNgWtYsR5S", "__typename": "AdjacentArticleType" }, "next": { "fno": "481500a115", "articleId": "1MNgnMu6Sju", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cw/2009/3791/0/3791a043", "title": "Enhancing Presence in Head-Mounted Display Environments by Visual Body Feedback Using Head-Mounted Cameras", "doi": null, "abstractUrl": "/proceedings-article/cw/2009/3791a043/12OmNxveNRr", "parentPublication": { "id": "proceedings/cw/2009/3791/0", "title": "2009 International Conference on CyberWorlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2012/1204/0/06184206", "title": "Poster: Head gesture 3D interface using a head mounted camera", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184206/12OmNzayNwg", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/07/ttg2011070888", "title": "Natural Perspective Projections for Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a640", "title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09850416", "title": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics", "doi": null, "abstractUrl": "/journal/tg/5555/01/09850416/1Fz4SPLVTMY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a470", "title": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a470/1JrQZ2SKCuQ", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a649", "title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a109", "title": "Generative RGB-D Face Completion for Head-Mounted Display Removal", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a109/1tnXncnHsIg", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a118", "title": "Exploring Head-based Mode-Switching in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a118/1yeD1RhEseY", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a413", "title": "Selective Foveated Ray Tracing for Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MNgk3BHlS0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1MNgl22Q3XG", "doi": "10.1109/VR55154.2023.00084", "title": "MoPeDT: A Modular Head-Mounted Display Toolkit to Conduct Peripheral Vision Research", "normalizedTitle": "MoPeDT: A Modular Head-Mounted Display Toolkit to Conduct Peripheral Vision Research", "abstract": "Peripheral vision plays a significant role in human perception and orientation. However, its relevance for human-computer interaction, especially head-mounted displays, has not been fully explored yet. In the past, a few specialized appliances were developed to display visual cues in the periphery, each designed for a single specific use case only. A multi-purpose headset to exclusively augment peripheral vision did not exist yet. We introduce MoPeDT: Modular Peripheral Display Toolkit, a freely available, flexible, reconfigurable, and extendable headset to conduct peripheral vision research. MoPeDT can be built with a 3D printer and off-the-shelf components. It features multiple spatially configurable near-eye display modules and full 3D tracking inside and outside the lab. With our system, researchers and designers may easily develop and prototype novel peripheral vision interaction and visualization techniques. We demonstrate the versatility of our headset with several possible applications for spatial awareness, balance, interaction, feedback, and notifications. We conducted a small study to evaluate the usability of the system. We found that participants were largely not irritated by the peripheral cues, but the headset's comfort could be further improved. We also evaluated our system based on established heuristics for human-computer interaction toolkits to show how MoPeDT adapts to changing requirements, lowers the entry barrier for peripheral vision research, and facilitates expressive power in the combination of modular building blocks.", "abstracts": [ { "abstractType": "Regular", "content": "Peripheral vision plays a significant role in human perception and orientation. However, its relevance for human-computer interaction, especially head-mounted displays, has not been fully explored yet. In the past, a few specialized appliances were developed to display visual cues in the periphery, each designed for a single specific use case only. A multi-purpose headset to exclusively augment peripheral vision did not exist yet. We introduce MoPeDT: Modular Peripheral Display Toolkit, a freely available, flexible, reconfigurable, and extendable headset to conduct peripheral vision research. MoPeDT can be built with a 3D printer and off-the-shelf components. It features multiple spatially configurable near-eye display modules and full 3D tracking inside and outside the lab. With our system, researchers and designers may easily develop and prototype novel peripheral vision interaction and visualization techniques. We demonstrate the versatility of our headset with several possible applications for spatial awareness, balance, interaction, feedback, and notifications. We conducted a small study to evaluate the usability of the system. We found that participants were largely not irritated by the peripheral cues, but the headset's comfort could be further improved. We also evaluated our system based on established heuristics for human-computer interaction toolkits to show how MoPeDT adapts to changing requirements, lowers the entry barrier for peripheral vision research, and facilitates expressive power in the combination of modular building blocks.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Peripheral vision plays a significant role in human perception and orientation. However, its relevance for human-computer interaction, especially head-mounted displays, has not been fully explored yet. In the past, a few specialized appliances were developed to display visual cues in the periphery, each designed for a single specific use case only. A multi-purpose headset to exclusively augment peripheral vision did not exist yet. We introduce MoPeDT: Modular Peripheral Display Toolkit, a freely available, flexible, reconfigurable, and extendable headset to conduct peripheral vision research. MoPeDT can be built with a 3D printer and off-the-shelf components. It features multiple spatially configurable near-eye display modules and full 3D tracking inside and outside the lab. With our system, researchers and designers may easily develop and prototype novel peripheral vision interaction and visualization techniques. We demonstrate the versatility of our headset with several possible applications for spatial awareness, balance, interaction, feedback, and notifications. We conducted a small study to evaluate the usability of the system. We found that participants were largely not irritated by the peripheral cues, but the headset's comfort could be further improved. We also evaluated our system based on established heuristics for human-computer interaction toolkits to show how MoPeDT adapts to changing requirements, lowers the entry barrier for peripheral vision research, and facilitates expressive power in the combination of modular building blocks.", "fno": "481500a691", "keywords": [ "Headphones", "Visualization", "Three Dimensional Displays", "Head Mounted Displays", "Prototypes", "Virtual Reality", "Printers", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Mixed Augmented Reality", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interactive Systems And Tools X 2014 User Interface Toolkits" ], "authors": [ { "affiliation": "University of Konstanz,Germany", "fullName": "Matthias Albrecht", "givenName": "Matthias", "surname": "Albrecht", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Konstanz,Germany", "fullName": "Lorenz Assländer", "givenName": "Lorenz", "surname": "Assländer", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Konstanz,Germany", "fullName": "Harald Reiterer", "givenName": "Harald", "surname": "Reiterer", "__typename": "ArticleAuthorType" }, { "affiliation": "Coburg University of Applied Sciences and Arts,Germany", "fullName": "Stephan Streuber", "givenName": "Stephan", "surname": "Streuber", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-03-01T00:00:00", "pubType": "proceedings", "pages": "691-701", "year": "2023", "issn": null, "isbn": "979-8-3503-4815-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1MNgkVSunKM", "name": "pvr202348150-010108453s1-mm_481500a691.zip", "size": "45.7 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108453s1-mm_481500a691.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "481500a680", "articleId": "1MNgp7L6LcY", "__typename": "AdjacentArticleType" }, "next": { "fno": "481500a703", "articleId": "1MNgWzXKbeg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2000/0478/0/04780233", "title": "Visuo-Haptic Display Using Head-Mounted Projector", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780233/12OmNwHz00K", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2009/3791/0/3791a043", "title": "Enhancing Presence in Head-Mounted Display Environments by Visual Body Feedback Using Head-Mounted Cameras", "doi": null, "abstractUrl": "/proceedings-article/cw/2009/3791a043/12OmNxveNRr", "parentPublication": { "id": "proceedings/cw/2009/3791/0", "title": "2009 International Conference on CyberWorlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446345", "title": "Investigating a Sparse Peripheral Display in a Head-Mounted Display for VR Locomotion", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446345/13bd1fZBGbI", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/07/ttg2011070888", "title": "Natural Perspective Projections for Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a746", "title": "Depth Reduction in Light-Field Head-Mounted Displays by Generating Intermediate Images as Virtual Images", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a746/1CJcGN8dsS4", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a470", "title": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a470/1JrQZ2SKCuQ", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a105", "title": "Real-Time Recognition of In-Place Body Actions and Head Gestures using Only a Head-Mounted Display", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a105/1MNgCnmbXyM", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089479", "title": "Directing versus Attracting Attention: Exploring the Effectiveness of Central and Peripheral Cues in Panoramic Videos", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089479/1jIx8WJhSM0", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090625", "title": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090625/1jIxwp2g0VO", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2020/6497/0/649700a041", "title": "Differences in the Uncanny Valley between Head-Mounted Displays and Monitors", "doi": null, "abstractUrl": "/proceedings-article/cw/2020/649700a041/1olHyEEy8CY", "parentPublication": { "id": "proceedings/cw/2020/6497/0", "title": "2020 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1axfDU6mDyU", "title": "2018 IEEE 37th International Performance Computing and Communications Conference (IPCCC)", "acronym": "ipccc", "groupId": "1000548", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "1axfEr9uTyE", "doi": "10.1109/PCCC.2018.8710858", "title": "iPand: Accurate Gesture Input with Ambient Acoustic Sensing on Hand", "normalizedTitle": "iPand: Accurate Gesture Input with Ambient Acoustic Sensing on Hand", "abstract": "Finger gesture input is emerged as an increasingly popular means of human-computer interactions. In this paper, we propose iPand, an acoustic sensing system that enables finger gesture input on the skin, which is more convenient, user-friendly and always accessible. Unlike previous works, which implement gesture input with dedicated devices, our system exploits passive acoustic sensing to identify the gestures, e.g. swipe left, swipe right, pinch and spread. The insight of our system is that specific gesture emits unique friction sound, which can be captured by the microphone embedded in wearable devices. We capture these acoustic signals and extract the features by using bandpass filters and short-time Fourier Transform. The offline convolutional neural network is adopted to recognize the gestures. iPand is implemented and evaluated using COTS smartphones and smartwatches. Experiment results show that iPand can achieve the recognition accuracy of 89%, 83% and 78% in three daily scenarios (i.e., library, lab and cafe), respectively. Particularly, our system supports multi-touch function where 2-4 fingers are enabled for more efficient and expressive gesture input, and its average accuracy for individual finger gesture reaches up to 83% within 12 gestures.", "abstracts": [ { "abstractType": "Regular", "content": "Finger gesture input is emerged as an increasingly popular means of human-computer interactions. In this paper, we propose iPand, an acoustic sensing system that enables finger gesture input on the skin, which is more convenient, user-friendly and always accessible. Unlike previous works, which implement gesture input with dedicated devices, our system exploits passive acoustic sensing to identify the gestures, e.g. swipe left, swipe right, pinch and spread. The insight of our system is that specific gesture emits unique friction sound, which can be captured by the microphone embedded in wearable devices. We capture these acoustic signals and extract the features by using bandpass filters and short-time Fourier Transform. The offline convolutional neural network is adopted to recognize the gestures. iPand is implemented and evaluated using COTS smartphones and smartwatches. Experiment results show that iPand can achieve the recognition accuracy of 89%, 83% and 78% in three daily scenarios (i.e., library, lab and cafe), respectively. Particularly, our system supports multi-touch function where 2-4 fingers are enabled for more efficient and expressive gesture input, and its average accuracy for individual finger gesture reaches up to 83% within 12 gestures.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Finger gesture input is emerged as an increasingly popular means of human-computer interactions. In this paper, we propose iPand, an acoustic sensing system that enables finger gesture input on the skin, which is more convenient, user-friendly and always accessible. Unlike previous works, which implement gesture input with dedicated devices, our system exploits passive acoustic sensing to identify the gestures, e.g. swipe left, swipe right, pinch and spread. The insight of our system is that specific gesture emits unique friction sound, which can be captured by the microphone embedded in wearable devices. We capture these acoustic signals and extract the features by using bandpass filters and short-time Fourier Transform. The offline convolutional neural network is adopted to recognize the gestures. iPand is implemented and evaluated using COTS smartphones and smartwatches. Experiment results show that iPand can achieve the recognition accuracy of 89%, 83% and 78% in three daily scenarios (i.e., library, lab and cafe), respectively. Particularly, our system supports multi-touch function where 2-4 fingers are enabled for more efficient and expressive gesture input, and its average accuracy for individual finger gesture reaches up to 83% within 12 gestures.", "fno": "08710858", "keywords": [ "Acoustic Signal Processing", "Band Pass Filters", "Convolutional Neural Nets", "Feature Extraction", "Fourier Transforms", "Friction", "Gesture Recognition", "Human Computer Interaction", "Ambient Acoustic Sensing", "Finger Gesture Input", "Human Computer Interactions", "Acoustic Sensing System", "Passive Acoustic Sensing", "Acoustic Signals", "I Pand", "Friction Sound", "Features Extraction", "Bandpass Filters", "Short Time Fourier Transform", "Convolutional Neural Network", "Acoustics", "Sensors", "Feature Extraction", "Microphones", "Skin", "Friction", "Smart Phones" ], "authors": [ { "affiliation": "School of Computer Science and Technology, University of Science and Technology of China, Hefei, China", "fullName": "Shumin Cao", "givenName": "Shumin", "surname": "Cao", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science and Technology, University of Science and Technology of China, Hefei, China", "fullName": "Xin He", "givenName": "Xin", "surname": "He", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science and Technology, University of Science and Technology of China, Hefei, China", "fullName": "Peide Zhu", "givenName": "Peide", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Communications Engineering, Army Engineering University, Nanjing, China", "fullName": "Mingshi Chen", "givenName": "Mingshi", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science and Technology, University of Science and Technology of China, Hefei, China", "fullName": "Xiangyang Li", "givenName": "Xiangyang", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science and Technology, University of Science and Technology of China, Hefei, China", "fullName": "Panlong Yang", "givenName": "Panlong", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "ipccc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-11-01T00:00:00", "pubType": "proceedings", "pages": "1-8", "year": "2018", "issn": null, "isbn": "978-1-5386-6808-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08711309", "articleId": "1axfGOUQuEU", "__typename": "AdjacentArticleType" }, "next": { "fno": "08711069", "articleId": "1axfJrK4nHq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icccnt/2013/3926/0/06726505", "title": "Hand gesture to speech conversion using Matlab", "doi": null, "abstractUrl": "/proceedings-article/icccnt/2013/06726505/12OmNqGA57o", "parentPublication": { "id": "proceedings/icccnt/2013/3926/0", "title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2017/2129/0/212901a121", "title": "SoundWrite II: Ambient Acoustic Sensing for Noise Tolerant Device-Free Gesture Recognition", "doi": null, "abstractUrl": "/proceedings-article/icpads/2017/212901a121/12OmNs5rkPT", "parentPublication": { "id": "proceedings/icpads/2017/2129/0", "title": "2017 IEEE 23rd International Conference on Parallel and Distributed Systems (ICPADS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2015/8342/0/07057909", "title": "Design of Exhibition contents using swipe gesture recognition communication based on Kinect", "doi": null, "abstractUrl": "/proceedings-article/icoin/2015/07057909/12OmNx1qV33", "parentPublication": { "id": "proceedings/icoin/2015/8342/0", "title": "2015 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2016/7258/0/07552959", "title": "Hand gesture recognition based on canonical formed superpixel earth mover's distance", "doi": null, "abstractUrl": "/proceedings-article/icme/2016/07552959/12OmNxE2mLg", "parentPublication": { "id": "proceedings/icme/2016/7258/0", "title": "2016 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/case/2007/1153/0/04341729", "title": "Multiple-angle Hand Gesture Recognition by Fusing SVM Classifiers", "doi": null, "abstractUrl": "/proceedings-article/case/2007/04341729/12OmNxj234l", "parentPublication": { "id": "proceedings/case/2007/1153/0", "title": "3rd Annual IEEE Conference on Automation Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scan/2006/2821/0/04402402", "title": "Interval Fuzzy Rule-Based Hand Gesture Recognition", "doi": null, "abstractUrl": "/proceedings-article/scan/2006/04402402/12OmNyRxFi0", "parentPublication": { "id": "proceedings/scan/2006/2821/0", "title": "Scientific Computing, Computer Arithmetic and Validated Numerics, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2019/5341/0/09001762", "title": "Multiplatform System for Hand Gesture Recognition", "doi": null, "abstractUrl": "/proceedings-article/isspit/2019/09001762/1hHMgoUFeHC", "parentPublication": { "id": "proceedings/isspit/2019/5341/0", "title": "2019 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/05/09229520", "title": "Push the Limit of Acoustic Gesture Recognition", "doi": null, "abstractUrl": "/journal/tm/2022/05/09229520/1o3njwLFEqI", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/07/09253526", "title": "UltraGesture: Fine-Grained Gesture Sensing and Recognition", "doi": null, "abstractUrl": "/journal/tm/2022/07/09253526/1oDXB816aGc", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382928", "title": "GestOnHMD: Enabling Gesture-based Interaction on Low-cost VR Head-Mounted Display", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382928/1saZuaAmvlu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNynsbxj", "title": "2014 XVI Symposium on Virtual and Augmented Reality (SVR)", "acronym": "svr", "groupId": "1800426", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNqBtiLI", "doi": "10.1109/SVR.2014.16", "title": "Tactile Interface for Navigation in Underground Mines", "normalizedTitle": "Tactile Interface for Navigation in Underground Mines", "abstract": "This paper presents the design and evaluation of a tactile vocabulary to aid navigation in an underground mine. We studied different ways to construct tactile vocabularies and assessed several tactile icons for aid navigation. After trying a dozen stimuli families, we have selected tactons based on the users' ability to perceive and process them during navigation in virtual environments to design a more usable tactile interface. Then, we performed a user experiment in a virtual simulation of an emergency situation in an underground mine. The user study shows that the tactile feedback facilitated the execution of the task. Also, the perceptual adjustment of the tactile vocabulary increased its usability as well as the memorization of its signals.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents the design and evaluation of a tactile vocabulary to aid navigation in an underground mine. We studied different ways to construct tactile vocabularies and assessed several tactile icons for aid navigation. After trying a dozen stimuli families, we have selected tactons based on the users' ability to perceive and process them during navigation in virtual environments to design a more usable tactile interface. Then, we performed a user experiment in a virtual simulation of an emergency situation in an underground mine. The user study shows that the tactile feedback facilitated the execution of the task. Also, the perceptual adjustment of the tactile vocabulary increased its usability as well as the memorization of its signals.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents the design and evaluation of a tactile vocabulary to aid navigation in an underground mine. We studied different ways to construct tactile vocabularies and assessed several tactile icons for aid navigation. After trying a dozen stimuli families, we have selected tactons based on the users' ability to perceive and process them during navigation in virtual environments to design a more usable tactile interface. Then, we performed a user experiment in a virtual simulation of an emergency situation in an underground mine. The user study shows that the tactile feedback facilitated the execution of the task. Also, the perceptual adjustment of the tactile vocabulary increased its usability as well as the memorization of its signals.", "fno": "4261a230", "keywords": [ "Computerised Navigation", "Force Feedback", "Haptic Interfaces", "Mining", "Virtual Reality", "Tactile Interface", "Navigation Aid", "Underground Mines", "Tactile Vocabulary", "Virtual Simulation", "Tactile Feedback", "Task Execution", "Emergency Situation", "Navigation", "Vocabulary", "Belts", "Tactile Sensors", "Games", "Fuel Processing Industries", "Haptic Interfaces", "Vibrotactile Communication", "3 D Navigation", "User Study" ], "authors": [ { "affiliation": "Inst. de Inf. (INF), Univ. Fed. do Rio Grande do Sul (UFRGS), Porto Alegre, Brazil", "fullName": "Victor Adriel de J. Oliveira", "givenName": "Victor Adriel de J.", "surname": "Oliveira", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. de Eng. de Minas, Univ. Fed. do Rio Grande do Sul (UFRGS), Porto Alegre, Brazil", "fullName": "Eduardo Marques", "givenName": "Eduardo", "surname": "Marques", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. de Eng. de Minas, Univ. Fed. do Rio Grande do Sul (UFRGS), Porto Alegre, Brazil", "fullName": "Rodrigo De Lemos Peroni", "givenName": "Rodrigo", "surname": "De Lemos Peroni", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. de Inf. (INF), Univ. Fed. do Rio Grande do Sul (UFRGS), Porto Alegre, Brazil", "fullName": "Anderson Maciel", "givenName": "Anderson", "surname": "Maciel", "__typename": "ArticleAuthorType" } ], "idPrefix": "svr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-05-01T00:00:00", "pubType": "proceedings", "pages": "230-237", "year": "2014", "issn": null, "isbn": "978-1-4799-4261-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4261a220", "articleId": "12OmNzBOie5", "__typename": "AdjacentArticleType" }, "next": { "fno": "4261a238", "articleId": "12OmNyeWdAX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isorc/2018/5847/0/584701a152", "title": "Zigbee Based Wireless Data Acquisition System for Underground Mines — A Feasibility Study", "doi": null, "abstractUrl": "/proceedings-article/isorc/2018/584701a152/12OmNs0TKPI", "parentPublication": { "id": "proceedings/isorc/2018/5847/0", "title": "2018 IEEE 21st International Symposium on Real-Time Distributed Computing (ISORC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icica/2014/3966/0/3966a305", "title": "Smart Helmet Using RF and WSN Technology for Underground Mines Safety", "doi": null, "abstractUrl": "/proceedings-article/icica/2014/3966a305/12OmNwudQMh", "parentPublication": { "id": "proceedings/icica/2014/3966/0", "title": "2014 International Conference on Intelligent Computing Applications (ICICA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2014/3624/0/06798878", "title": "Poster: Applying tactile languages for 3D navigation", "doi": null, "abstractUrl": "/proceedings-article/3dui/2014/06798878/12OmNx0A7Mz", "parentPublication": { "id": "proceedings/3dui/2014/3624/0", "title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2009/3890/0/3890a425", "title": "Touchable Video and Tactile Audio", "doi": null, "abstractUrl": "/proceedings-article/ism/2009/3890a425/12OmNxTmHIC", "parentPublication": { "id": "proceedings/ism/2009/3890/0", "title": "2009 11th IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icris/2018/6580/0/658001a013", "title": "Navigation and Positioning System Applied in Underground Driverless Vehicle Based on IMU", "doi": null, "abstractUrl": "/proceedings-article/icris/2018/658001a013/12OmNxwENqw", "parentPublication": { "id": "proceedings/icris/2018/6580/0", "title": "2018 International Conference on Robots & Intelligent System (ICRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2011/4353/2/05750971", "title": "Particle Filter Localization in Underground Mines Using UWB Ranging", "doi": null, "abstractUrl": "/proceedings-article/icicta/2011/05750971/12OmNzSQdpw", "parentPublication": { "id": "icicta/2011/4353/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2011/03/mpc2011030035", "title": "Feel Your Route: A Tactile Display for Car Navigation", "doi": null, "abstractUrl": "/magazine/pc/2011/03/mpc2011030035/13rRUIJcWiU", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1998/06/mcg1998060032", "title": "Selectively Stimulating Skin Receptors for Tactile Display", "doi": null, "abstractUrl": "/magazine/cg/1998/06/mcg1998060032/13rRUxN5evz", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2010/02/tth2010020078", "title": "Field-Based Validation of a Tactile Navigation Device", "doi": null, "abstractUrl": "/journal/th/2010/02/tth2010020078/13rRUxly8T8", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/chase/2018/7206/0/720600a031", "title": "A Wearable System for Situational Awareness Estimation in Underground Mines", "doi": null, "abstractUrl": "/proceedings-article/chase/2018/720600a031/181W9onlXJD", "parentPublication": { "id": "proceedings/chase/2018/7206/0", "title": "2018 IEEE/ACM International Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxX3uML", "title": "2009 11th IEEE International Symposium on Multimedia", "acronym": "ism", "groupId": "1001094", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNxTmHIC", "doi": "10.1109/ISM.2009.79", "title": "Touchable Video and Tactile Audio", "normalizedTitle": "Touchable Video and Tactile Audio", "abstract": "We propose a Haptic Audio Visual System (HAVS) which consists of a touchable video and a tactile audio. Touchable video generates tactile feedback according to the user’s real-time interactions on the video. Tactile audio provides the tactile experiences through the automatic generation of vibro-tactile stimuli using a sound detection algorithm. The video screen is divided into a grid, and each cell has customizable tactile information which corresponds to the visual content. At the same time, the sound stream is also analyzed and customizable vibration responses are synchronized to specific sound effects or audio \"signatures\". One of the important advantages of HAVS is the ability to augment and enhance immersiveness and the interactivity of users in 3D virtual environments. The application of HAVS to commercial, military, educational and medical contexts including: home shopping, online gaming, interactive broadcasting, teaching/learning and teleoperator settings will be discussed.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a Haptic Audio Visual System (HAVS) which consists of a touchable video and a tactile audio. Touchable video generates tactile feedback according to the user’s real-time interactions on the video. Tactile audio provides the tactile experiences through the automatic generation of vibro-tactile stimuli using a sound detection algorithm. The video screen is divided into a grid, and each cell has customizable tactile information which corresponds to the visual content. At the same time, the sound stream is also analyzed and customizable vibration responses are synchronized to specific sound effects or audio \"signatures\". One of the important advantages of HAVS is the ability to augment and enhance immersiveness and the interactivity of users in 3D virtual environments. The application of HAVS to commercial, military, educational and medical contexts including: home shopping, online gaming, interactive broadcasting, teaching/learning and teleoperator settings will be discussed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a Haptic Audio Visual System (HAVS) which consists of a touchable video and a tactile audio. Touchable video generates tactile feedback according to the user’s real-time interactions on the video. Tactile audio provides the tactile experiences through the automatic generation of vibro-tactile stimuli using a sound detection algorithm. The video screen is divided into a grid, and each cell has customizable tactile information which corresponds to the visual content. At the same time, the sound stream is also analyzed and customizable vibration responses are synchronized to specific sound effects or audio \"signatures\". One of the important advantages of HAVS is the ability to augment and enhance immersiveness and the interactivity of users in 3D virtual environments. The application of HAVS to commercial, military, educational and medical contexts including: home shopping, online gaming, interactive broadcasting, teaching/learning and teleoperator settings will be discussed.", "fno": "3890a425", "keywords": [ "Haptic Video", "Haptic Audio", "Touchable Video", "Tactile Audio" ], "authors": [ { "affiliation": null, "fullName": "Mee Young Sung", "givenName": "Mee Young", "surname": "Sung", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kyungkoo Jun", "givenName": "Kyungkoo", "surname": "Jun", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dongju Ji", "givenName": "Dongju", "surname": "Ji", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hwanmun Lee", "givenName": "Hwanmun", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kikwon Kim", "givenName": "Kikwon", "surname": "Kim", "__typename": "ArticleAuthorType" } ], "idPrefix": "ism", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-12-01T00:00:00", "pubType": "proceedings", "pages": "425-431", "year": "2009", "issn": null, "isbn": "978-0-7695-3890-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3890a418", "articleId": "12OmNwpXRY7", "__typename": "AdjacentArticleType" }, "next": { "fno": "3890a432", "articleId": "12OmNCmpcJq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2007/2928/0/29280097", "title": "Video Game Console Audio: Evolution and Future Trends", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2007/29280097/12OmNAhxjD3", "parentPublication": { "id": "proceedings/cgiv/2007/2928/0", "title": "Computer Graphics, Imaging and Visualisation (CGIV 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mue/2008/3134/0/3134a282", "title": "Audio-Based Video Editing with Two-Channel Microphone", "doi": null, "abstractUrl": "/proceedings-article/mue/2008/3134a282/12OmNApu5E0", "parentPublication": { "id": "proceedings/mue/2008/3134/0", "title": "Multimedia and Ubiquitous Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223432", "title": "Various forms of tactile feedback displayed on the back of the tablet: Latency minimized by using audio signal to control actuators", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223432/12OmNB9bvqr", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2010/6821/0/05444682", "title": "Redundant coding of simulated tactile key clicks with audio signals", "doi": null, "abstractUrl": "/proceedings-article/haptics/2010/05444682/12OmNBCqbIT", "parentPublication": { "id": "proceedings/haptics/2010/6821/0", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2010/4270/0/4270a831", "title": "Design of Audio and Video Multiplexing Based on Intellectual Property Module", "doi": null, "abstractUrl": "/proceedings-article/iccis/2010/4270a831/12OmNCm7BMn", "parentPublication": { "id": "proceedings/iccis/2010/4270/0", "title": "2010 International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/delta/2002/1453/0/14530063", "title": "Reconfigurable DSP's for Efficient MPEG-4 Video and Audio Decoding", "doi": null, "abstractUrl": "/proceedings-article/delta/2002/14530063/12OmNvT2paY", "parentPublication": { "id": "proceedings/delta/2002/1453/0", "title": "Proceedings First IEEE International Workshop on Electronic Design, Test and Applications '2002", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itcs/2009/3688/2/3688b575", "title": "Research on Audio-Video Materials Managing System Based on Content Management", "doi": null, "abstractUrl": "/proceedings-article/itcs/2009/3688b575/12OmNwD1q4W", "parentPublication": { "id": "proceedings/itcs/2009/3688/2", "title": "Information Technology and Computer Science, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2005/9331/0/01521400", "title": "Audio, video and audio-visual signatures for short video clip detection: experiments on Trecvid2003", "doi": null, "abstractUrl": "/proceedings-article/icme/2005/01521400/12OmNxR5UKg", "parentPublication": { "id": "proceedings/icme/2005/9331/0", "title": "2005 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcs/1999/0253/2/02530225", "title": "Joint Audio-Video Processing of MPEG Encoded Sequences", "doi": null, "abstractUrl": "/proceedings-article/icmcs/1999/02530225/12OmNzayNcg", "parentPublication": { "id": "proceedings/icmcs/1999/0253/2", "title": "Multimedia Computing and Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1998/06/mcg1998060032", "title": "Selectively Stimulating Skin Receptors for Tactile Display", "doi": null, "abstractUrl": "/magazine/cg/1998/06/mcg1998060032/13rRUxN5evz", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx8Ounz", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "acronym": "haptics", "groupId": "1000312", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNxveNHK", "doi": "10.1109/HAPTIC.2010.5444650", "title": "A finger attachment to generate tactile feedback and make 3D gesture detectable by touch panel sensor", "normalizedTitle": "A finger attachment to generate tactile feedback and make 3D gesture detectable by touch panel sensor", "abstract": "In this paper, we introduce a simple device called Haptic Adapter which is attached to a finger and generates tactile feedback to the finger during touch panel operation. It also converts a finger action in the 3D space into a sequence of contact points on the 2D touch panel surface. Prototypes of Haptic Adapter have been designed and applied to such problems as text inputting, web browsing and virtual gaming. Effectiveness of its tactile feedback has been evaluated with the prototypes and their applications. The tactile feedback is used to guide the finger operation on the touch panel so that the finger operation is confirmed with the tactile sense and the input-error is reduced through the confirmation. It is also used to give realistic tactile feeling while playing games on the touch panel. Samples of such games are also demonstrated.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we introduce a simple device called Haptic Adapter which is attached to a finger and generates tactile feedback to the finger during touch panel operation. It also converts a finger action in the 3D space into a sequence of contact points on the 2D touch panel surface. Prototypes of Haptic Adapter have been designed and applied to such problems as text inputting, web browsing and virtual gaming. Effectiveness of its tactile feedback has been evaluated with the prototypes and their applications. The tactile feedback is used to guide the finger operation on the touch panel so that the finger operation is confirmed with the tactile sense and the input-error is reduced through the confirmation. It is also used to give realistic tactile feeling while playing games on the touch panel. Samples of such games are also demonstrated.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we introduce a simple device called Haptic Adapter which is attached to a finger and generates tactile feedback to the finger during touch panel operation. It also converts a finger action in the 3D space into a sequence of contact points on the 2D touch panel surface. Prototypes of Haptic Adapter have been designed and applied to such problems as text inputting, web browsing and virtual gaming. Effectiveness of its tactile feedback has been evaluated with the prototypes and their applications. The tactile feedback is used to guide the finger operation on the touch panel so that the finger operation is confirmed with the tactile sense and the input-error is reduced through the confirmation. It is also used to give realistic tactile feeling while playing games on the touch panel. Samples of such games are also demonstrated.", "fno": "05444650", "keywords": [ "Computer Games", "Gesture Recognition", "Haptic Interfaces", "Internet", "Tactile Sensors", "Touch Sensitive Screens", "Finger Attachment", "Tactile Feedback", "3 D Gesture Detection", "Touch Panel Sensor", "Haptic Adapter", "Text Inputting", "Web Browsing", "Virtual Gaming", "Fingers", "Feedback", "Tactile Sensors", "Haptic Interfaces", "Space Technology", "User Interfaces", "Pressing", "Image Sensors", "Laboratories", "Virtual Prototyping", "Virtual Reality", "Haptic I O", "Input Devices And Strategies", "User Interfaces", "Games", "Human Factors" ], "authors": [ { "affiliation": "Imaging Science and Engineering Laboratory, Tokyo Institute of Technology, Japan", "fullName": "Itsuo Kumazawa", "givenName": "Itsuo", "surname": "Kumazawa", "__typename": "ArticleAuthorType" } ], "idPrefix": "haptics", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-03-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2010", "issn": "2324-7347", "isbn": "978-1-4244-6821-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05444653", "articleId": "12OmNqJ8tn9", "__typename": "AdjacentArticleType" }, "next": { "fno": "05444651", "articleId": "12OmNxWLTH6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223432", "title": "Various forms of tactile feedback displayed on the back of the tablet: Latency minimized by using audio signal to control actuators", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223432/12OmNB9bvqr", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2007/0907/0/04142855", "title": "Tactile Feedback at the Finger Tips for Improved Direct Interaction in Immersive Environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2007/04142855/12OmNBWi6KF", "parentPublication": { "id": "proceedings/3dui/2007/0907/0", "title": "2007 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1992/2720/0/00220165", "title": "Development of a finger-shaped tactile sensor and its evaluation by active touch", "doi": null, "abstractUrl": "/proceedings-article/robot/1992/00220165/12OmNweBUKa", "parentPublication": { "id": "proceedings/robot/1992/2720/0", "title": "Proceedings 1992 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549418", "title": "An actuated stage for a tablet computer: Generation of tactile feedback and communication using the motion of the whole tablet", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549418/12OmNxFsmGl", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2010/6821/0/05444685", "title": "Human vs. robotic tactile sensing: Detecting lumps in soft tissue", "doi": null, "abstractUrl": "/proceedings-article/haptics/2010/05444685/12OmNxTVU0U", "parentPublication": { "id": "proceedings/haptics/2010/6821/0", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/01/07234937", "title": "Rich Pinch: Perception of Object Movement with Tactile Illusion", "doi": null, "abstractUrl": "/journal/th/2016/01/07234937/13rRUEgarnR", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2022/3418/0/341800a115", "title": "Toward a Tactile Ontology for Semantic Interoperability of the Tactile Internet", "doi": null, "abstractUrl": "/proceedings-article/icsc/2022/341800a115/1BYIqKYO4co", "parentPublication": { "id": "proceedings/icsc/2022/3418/0", "title": "2022 IEEE 16th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a304", "title": "IMPReSS: Improved Multi-Touch Progressive Refinement Selection Strategy", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a304/1CJetSxfyi4", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a694", "title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a646", "title": "Transcutaneous Electrical Nerve Stimulation along the Base of the Finger to Modify the Location of Tactile Sensation at the Finger", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a646/1J7WebwvRgA", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgohS", "title": "2018 Nicograph International (NicoInt)", "acronym": "nicoint", "groupId": "1814784", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1AITnav", "doi": "10.1109/NICOINT.2018.00038", "title": "The Development of a Virtual Doll Companion for Haptic Interaction", "normalizedTitle": "The Development of a Virtual Doll Companion for Haptic Interaction", "abstract": "The main purpose of virtual reality is to immerse a user in a virtual environment. However, the most common criticism of the commodity VR controllers is that they are not able to provide effective tactile feedback. In this paper, we propose a 9-DOF sensor driven haptic feedback companion which is here to bridge the virtual and physical worlds. A 9-DoF sensor through UDP protocol allows us to display the doll's position in real-time. Touch sensor enabled servo motor gives users the effective haptic feedback. When the doll shows different facial expressions in the virtual scenarios, the physical head will have different kinds of rotations in real-time.", "abstracts": [ { "abstractType": "Regular", "content": "The main purpose of virtual reality is to immerse a user in a virtual environment. However, the most common criticism of the commodity VR controllers is that they are not able to provide effective tactile feedback. In this paper, we propose a 9-DOF sensor driven haptic feedback companion which is here to bridge the virtual and physical worlds. A 9-DoF sensor through UDP protocol allows us to display the doll's position in real-time. Touch sensor enabled servo motor gives users the effective haptic feedback. When the doll shows different facial expressions in the virtual scenarios, the physical head will have different kinds of rotations in real-time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The main purpose of virtual reality is to immerse a user in a virtual environment. However, the most common criticism of the commodity VR controllers is that they are not able to provide effective tactile feedback. In this paper, we propose a 9-DOF sensor driven haptic feedback companion which is here to bridge the virtual and physical worlds. A 9-DoF sensor through UDP protocol allows us to display the doll's position in real-time. Touch sensor enabled servo motor gives users the effective haptic feedback. When the doll shows different facial expressions in the virtual scenarios, the physical head will have different kinds of rotations in real-time.", "fno": "690901a092", "keywords": [ "Haptic Interfaces", "Tactile Sensors", "Transport Protocols", "Virtual Reality", "Virtual Doll Companion", "Haptic Interaction", "Virtual Reality", "Virtual Environment", "Commodity VR Controllers", "Sensor Driven Haptic Feedback Companion", "Virtual Worlds", "Physical Worlds", "9 Do F Sensor", "UDP Protocol", "Touch Sensor", "Tactile Feedback", "Haptic Interfaces", "Protocols", "Virtual Environments", "Tactile Sensors", "Real Time Systems", "Servomotors", "Art", "Haptic", "Interaction", "Tactile Sensation", "Virtual Companion", "Character Behavior" ], "authors": [ { "affiliation": null, "fullName": "Jen-Tun Lee", "givenName": "Jen-Tun", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "R.P.C. Janaka Rajapakse", "givenName": "R.P.C. Janaka", "surname": "Rajapakse", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yi-Ping Hung", "givenName": "Yi-Ping", "surname": "Hung", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yoshimasa Tokuyama", "givenName": "Yoshimasa", "surname": "Tokuyama", "__typename": "ArticleAuthorType" } ], "idPrefix": "nicoint", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "92-92", "year": "2018", "issn": null, "isbn": "978-1-5386-6909-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "690901a091", "articleId": "13bd1h03qOH", "__typename": "AdjacentArticleType" }, "next": { "fno": "690901a093", "articleId": "13bd1gFCjrQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icma/2010/4293/0/4293a271", "title": "Interactive Forces Analysis and Haptic Modeling for Virtual Prototyping and Product Development", "doi": null, "abstractUrl": "/proceedings-article/icma/2010/4293a271/12OmNARRYji", "parentPublication": { "id": "proceedings/icma/2010/4293/0", "title": "2010 International Conference on Manufacturing Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223325", "title": "Elastic-Arm: Human-scale passive haptic feedback for augmenting interaction and perception in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223325/12OmNrFTrba", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/1999/0234/0/02340029", "title": "Development of Stereoscopic-Haptic Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/cbms/1999/02340029/12OmNwHz090", "parentPublication": { "id": "proceedings/cbms/1999/0234/0", "title": "Proceedings 12th IEEE Symposium on Computer-Based Medical Systems (Cat. No.99CB36365)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvri/2011/0054/0/05759662", "title": "Pseudo-haptic feedback augmented with visual and tactile vibrations", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759662/12OmNzvz6OE", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2015/04/07124519", "title": "Displaying Sensed Tactile Cues with a Fingertip Haptic Device", "doi": null, "abstractUrl": "/journal/th/2015/04/07124519/13rRUxC0SWk", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/11/ttg2011111714", "title": "Six Degrees-of-Freedom Haptic Interaction with Fluids", "doi": null, "abstractUrl": "/journal/tg/2011/11/ttg2011111714/13rRUxNW1Zj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/03/07784835", "title": "A 3-RSR Haptic Wearable Device for Rendering Fingertip Contact Forces", "doi": null, "abstractUrl": "/journal/th/2017/03/07784835/13rRUxZ0o1H", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798139", "title": "Human, Virtual Human, Bump! A Preliminary Study on Haptic Feedback", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798139/1cJ157IzTri", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089598", "title": "Implementation and Evaluation of Touch-based Interaction Using Electrovibration Haptic Feedback in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089598/1jIxb4ZNizS", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a239", "title": "Haptic Handshank – A Handheld Multimodal Haptic Feedback Controller for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a239/1pysvf0EzfO", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1h03qOn", "doi": "10.1109/VR.2018.8446054", "title": "Keynote Speaker Tactile Reality", "normalizedTitle": "Keynote Speaker Tactile Reality", "abstract": "Touching an object causes rich haptic cues that enable you to understand the object's physical properties and adeptly control the interaction. Although human experience centers on physical contact with tangible items, few computer systems provide the user with high-fidelity touch feedback, limiting their intuitiveness. Haptic interfaces are mechatronic systems that modulate the physical interaction between a human and his or her tangible surroundings. Such interfaces typically involve mechanical, electrical, and computational layers that work together to sense user motions or forces, quickly process these inputs with other information, and physically respond by actuating elements of the user's surroundings. By way of three examples, this talk will demonstrate that well-designed tactile feedback can greatly increase the realism of virtual worlds. First, we created a simple visuo-audio-tactile simulator to help dental students learn to find cavities in teeth. The user watches a video of a real dental tool interacting with a tooth while simultaneously feeling an authentic rendering of the associated contact vibrations. Second, we created the world's most realistic haptic virtual surfaces by recording and modeling what a user feels when touching 100 real objects with an instrumented stylus. The perceptual effects of displaying the resulting data-driven friction forces, tapping transients, and texture vibrations were quantified by having users compare the original surfaces to their virtual versions. Third, we extended the haptic texture concept to capture how a real robot vibrates as it moves its joints and tied this model to measured user motions. The resulting vibrotactile experiences were formally evaluated and then added to an immersive game that lets the user feel what it would be like to turn into a robot. While much work remains to be done, we are starting to see the tantalizing potential of systems that leverage tactile cues to allow a user to interact with virtual environments as though they were real.", "abstracts": [ { "abstractType": "Regular", "content": "Touching an object causes rich haptic cues that enable you to understand the object's physical properties and adeptly control the interaction. Although human experience centers on physical contact with tangible items, few computer systems provide the user with high-fidelity touch feedback, limiting their intuitiveness. Haptic interfaces are mechatronic systems that modulate the physical interaction between a human and his or her tangible surroundings. Such interfaces typically involve mechanical, electrical, and computational layers that work together to sense user motions or forces, quickly process these inputs with other information, and physically respond by actuating elements of the user's surroundings. By way of three examples, this talk will demonstrate that well-designed tactile feedback can greatly increase the realism of virtual worlds. First, we created a simple visuo-audio-tactile simulator to help dental students learn to find cavities in teeth. The user watches a video of a real dental tool interacting with a tooth while simultaneously feeling an authentic rendering of the associated contact vibrations. Second, we created the world's most realistic haptic virtual surfaces by recording and modeling what a user feels when touching 100 real objects with an instrumented stylus. The perceptual effects of displaying the resulting data-driven friction forces, tapping transients, and texture vibrations were quantified by having users compare the original surfaces to their virtual versions. Third, we extended the haptic texture concept to capture how a real robot vibrates as it moves its joints and tied this model to measured user motions. The resulting vibrotactile experiences were formally evaluated and then added to an immersive game that lets the user feel what it would be like to turn into a robot. While much work remains to be done, we are starting to see the tantalizing potential of systems that leverage tactile cues to allow a user to interact with virtual environments as though they were real.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Touching an object causes rich haptic cues that enable you to understand the object's physical properties and adeptly control the interaction. Although human experience centers on physical contact with tangible items, few computer systems provide the user with high-fidelity touch feedback, limiting their intuitiveness. Haptic interfaces are mechatronic systems that modulate the physical interaction between a human and his or her tangible surroundings. Such interfaces typically involve mechanical, electrical, and computational layers that work together to sense user motions or forces, quickly process these inputs with other information, and physically respond by actuating elements of the user's surroundings. By way of three examples, this talk will demonstrate that well-designed tactile feedback can greatly increase the realism of virtual worlds. First, we created a simple visuo-audio-tactile simulator to help dental students learn to find cavities in teeth. The user watches a video of a real dental tool interacting with a tooth while simultaneously feeling an authentic rendering of the associated contact vibrations. Second, we created the world's most realistic haptic virtual surfaces by recording and modeling what a user feels when touching 100 real objects with an instrumented stylus. The perceptual effects of displaying the resulting data-driven friction forces, tapping transients, and texture vibrations were quantified by having users compare the original surfaces to their virtual versions. Third, we extended the haptic texture concept to capture how a real robot vibrates as it moves its joints and tied this model to measured user motions. The resulting vibrotactile experiences were formally evaluated and then added to an immersive game that lets the user feel what it would be like to turn into a robot. While much work remains to be done, we are starting to see the tantalizing potential of systems that leverage tactile cues to allow a user to interact with virtual environments as though they were real.", "fno": "08446054", "keywords": [ "Dentistry", "Friction", "Haptic Interfaces", "Mechatronics", "Rendering Computer Graphics", "Virtual Reality", "Virtual Worlds", "Simple Visuo Audio Tactile Simulator", "Dental Students", "Authentic Rendering", "Instrumented Stylus", "Perceptual Effects", "Texture Vibrations", "Haptic Texture Concept", "Measured User Motions", "Virtual Environments", "Keynote Speaker Tactile Reality", "Rich Haptic Cues", "Physical Properties", "Human Experience Centers", "Physical Contact", "Tangible Items", "Computer Systems", "High Fidelity Touch Feedback", "Haptic Interfaces", "Mechatronic Systems", "Physical Interaction", "Tangible Surroundings", "Mechanical Layers", "Computational Layers", "Actuating Elements", "Tactile Feedback", "Dental Tool", "Tactile Cues", "Electrical Layers", "Contact Vibrations", "Data Driven Friction Forces", "Vibrotactile Experiences", "Vibrations", "Solid Modeling", "Virtual Environments", "Teeth", "Vibration Measurement", "User Interfaces", "Haptic Interfaces" ], "authors": [ { "affiliation": "Max Planck Institute for Intelligent Systems", "fullName": "Katherine J. Kuchenbecker", "givenName": "Katherine J.", "surname": "Kuchenbecker", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "1-9", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446505", "articleId": "13bd1gzWkRi", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446476", "articleId": "13bd1gFCjrF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892341", "title": "Classification method of tactile feeling using stacked autoencoder based on haptic primary colors", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892341/12OmNA14Ae6", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisa/2012/1402/0/06220964", "title": "Visual, Haptic, and Auditory Realities Based Dental Training Simulator", "doi": null, "abstractUrl": "/proceedings-article/icisa/2012/06220964/12OmNAle6mY", "parentPublication": { "id": "proceedings/icisa/2012/1402/0", "title": "2012 International Conference on Information Science and Applications (ICISA 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/Ismar-mashd/2015/9628/0/9628a051", "title": "A Novel Haptic Vibration Media and Its Application", "doi": null, "abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a051/12OmNCgJe7j", "parentPublication": { "id": "proceedings/Ismar-mashd/2015/9628/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiccsa/2014/7100/0/07073274", "title": "A study on the design and effectiveness of tactile feedback in driving simulator", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2014/07073274/12OmNCyBXk4", "parentPublication": { "id": "proceedings/aiccsa/2014/7100/0", "title": "2014 IEEE/ACS 11th International Conference on Computer Systems and Applications (AICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2016/3906/0/3906a424", "title": "Application of a 3D Haptic Virtual Reality Simulation System for Dental Crown Preparation Training", "doi": null, "abstractUrl": "/proceedings-article/itme/2016/3906a424/12OmNrAMEPv", "parentPublication": { "id": "proceedings/itme/2016/3906/0", "title": "2016 8th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wse/2010/8637/0/05623571", "title": "Introducing haptic interactions in web application modeling", "doi": null, "abstractUrl": "/proceedings-article/wse/2010/05623571/12OmNzd7bpI", "parentPublication": { "id": "proceedings/wse/2010/8637/0", "title": "12th IEEE International Symposium on Web Systems Evolution (WSE 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvri/2011/0054/0/05759662", "title": "Pseudo-haptic feedback augmented with visual and tactile vibrations", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759662/12OmNzvz6OE", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/01/07539397", "title": "Importance of Matching Physical Friction, Hardness, and Texture in Creating Realistic Haptic Virtual Surfaces", "doi": null, "abstractUrl": "/journal/th/2017/01/07539397/13rRUxAAT7O", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/02/tth2011020088", "title": "Exploration of Tactile Contact in a Haptic Display: Effects of Contact Velocity and Transient Vibrations", "doi": null, "abstractUrl": "/journal/th/2011/02/tth2011020088/13rRUxE04tK", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/12/09117062", "title": "Augmenting Perceived Softness of Haptic Proxy Objects Through Transient Vibration and Visuo-Haptic Illusion in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2021/12/09117062/1kGg69DDrFe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J7W6LmbCw0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "9973799", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J7W7VSYX84", "doi": "10.1109/ISMAR-Adjunct57072.2022.00122", "title": "Effects of Tactile Feedback on Conceptual Understanding of Electromagnetism in a Virtual Reality Experience", "normalizedTitle": "Effects of Tactile Feedback on Conceptual Understanding of Electromagnetism in a Virtual Reality Experience", "abstract": "This research project aimed to investigate the effect of a virtual reality (VR) environment and tactile feedback on students&#x0027; con-ceptual understanding of electromagnetism. In our developed ap-plication, we simulated the physics concept of electromagnetism through charged particles and their interaction through field lines and isosurfaces in 3D. We divided interactions with virtual particles into four scenarios: 1) interaction between two positively charged particles; 2) interaction between two negatively charged particles; 3) interaction between one positively and one negatively charged particle; and 4) interaction among three particles, one positively and two negatively charged. We conducted a between-group study in which undergraduate students (n &#x003D; 41) experienced either only visual feedback (n &#x003D; 20) or simultaneous visual and haptic feedback (n &#x003D; 21). We found significant differences (p-value &#x003C;. 05) regarding knowledge gain in both the pretest and posttest. However, we did not find significant differences in the posttest between conditions, but the group assigned the simultaneous feedback condition indicated that tactile feedback helped them understand the electric fields. In this paper, we discuss our results&#x0027; implications in designing a VR learning environment.", "abstracts": [ { "abstractType": "Regular", "content": "This research project aimed to investigate the effect of a virtual reality (VR) environment and tactile feedback on students&#x0027; con-ceptual understanding of electromagnetism. In our developed ap-plication, we simulated the physics concept of electromagnetism through charged particles and their interaction through field lines and isosurfaces in 3D. We divided interactions with virtual particles into four scenarios: 1) interaction between two positively charged particles; 2) interaction between two negatively charged particles; 3) interaction between one positively and one negatively charged particle; and 4) interaction among three particles, one positively and two negatively charged. We conducted a between-group study in which undergraduate students (n &#x003D; 41) experienced either only visual feedback (n &#x003D; 20) or simultaneous visual and haptic feedback (n &#x003D; 21). We found significant differences (p-value &#x003C;. 05) regarding knowledge gain in both the pretest and posttest. However, we did not find significant differences in the posttest between conditions, but the group assigned the simultaneous feedback condition indicated that tactile feedback helped them understand the electric fields. In this paper, we discuss our results&#x0027; implications in designing a VR learning environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This research project aimed to investigate the effect of a virtual reality (VR) environment and tactile feedback on students' con-ceptual understanding of electromagnetism. In our developed ap-plication, we simulated the physics concept of electromagnetism through charged particles and their interaction through field lines and isosurfaces in 3D. We divided interactions with virtual particles into four scenarios: 1) interaction between two positively charged particles; 2) interaction between two negatively charged particles; 3) interaction between one positively and one negatively charged particle; and 4) interaction among three particles, one positively and two negatively charged. We conducted a between-group study in which undergraduate students (n = 41) experienced either only visual feedback (n = 20) or simultaneous visual and haptic feedback (n = 21). We found significant differences (p-value <. 05) regarding knowledge gain in both the pretest and posttest. However, we did not find significant differences in the posttest between conditions, but the group assigned the simultaneous feedback condition indicated that tactile feedback helped them understand the electric fields. In this paper, we discuss our results' implications in designing a VR learning environment.", "fno": "536500a588", "keywords": [ "Computer Aided Instruction", "Feedback", "Haptic Interfaces", "Virtual Reality", "Charged Particles", "Developed Ap Plication", "Haptic Feedback", "Negatively Charged Particle", "Simultaneous Feedback Condition", "Simultaneous Visual Feedback", "Tactile Feedback", "Virtual Particles", "Virtual Reality Environment", "Virtual Reality Experience", "Vibrations", "Visualization", "Three Dimensional Displays", "Tactile Sensors", "Virtual Environments", "Haptic Interfaces", "Electric Fields", "Human Centered Computing Human Computer In Teraction HCI HCI Design And Evaluation Methods User Studies", "Education Computer Graphics Graphics Systems And Interfaces Virtual Reality" ], "authors": [ { "affiliation": "Purdue University", "fullName": "Pedro Acevedo", "givenName": "Pedro", "surname": "Acevedo", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University", "fullName": "Alejandra Magana", "givenName": "Alejandra", "surname": "Magana", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University", "fullName": "Christos Mousas", "givenName": "Christos", "surname": "Mousas", "__typename": "ArticleAuthorType" }, { "affiliation": "Costa Rica Institute of Technology", "fullName": "Yoselyn Walsh", "givenName": "Yoselyn", "surname": "Walsh", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University", "fullName": "Hector Will Pinto", "givenName": "Hector Will", "surname": "Pinto", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University", "fullName": "Bedrich Benes", "givenName": "Bedrich", "surname": "Benes", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "588-593", "year": "2022", "issn": null, "isbn": "978-1-6654-5365-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "536500a582", "articleId": "1J7WlClqJCo", "__typename": "AdjacentArticleType" }, "next": { "fno": "536500a594", "articleId": "1J7WjkWVarS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2014/2871/0/06802106", "title": "An ungrounded tactile feedback device to portray force and torque-like interactions in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802106/12OmNAg7jYE", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892304", "title": "Tactile feedback enhanced with discharged elastic energy and its effectiveness for in-air key-press and swipe operations", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892304/12OmNqzcvSl", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/achi/2009/3529/0/3529a234", "title": "Reconfiguration of Vibro-tactile Feedback Based on Drivers' Sitting Attitude", "doi": null, "abstractUrl": "/proceedings-article/achi/2009/3529a234/12OmNyKa6bz", "parentPublication": { "id": "proceedings/achi/2009/3529/0", "title": "International Conference on Advances in Computer-Human Interaction", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvri/2011/0054/0/05759662", "title": "Pseudo-haptic feedback augmented with visual and tactile vibrations", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759662/12OmNzvz6OE", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/01/07234937", "title": "Rich Pinch: Perception of Object Movement with Tactile Illusion", "doi": null, "abstractUrl": "/journal/th/2016/01/07234937/13rRUEgarnR", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2009/02/tth2009020103", "title": "Tactile Feedback Induces Reduced Grasping Force in Robot-Assisted Surgery", "doi": null, "abstractUrl": "/journal/th/2009/02/tth2009020103/13rRUwInvla", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2012/03/tth2012030240", "title": "Evaluation of Tactile Feedback Methods for Wrist Rotation Guidance", "doi": null, "abstractUrl": "/journal/th/2012/03/tth2012030240/13rRUyoPSPg", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cscloud-edgecom/2019/1661/0/166100a151", "title": "Efficient Shoulder Surfing Resistant PIN Authentication Scheme Based on Localized Tactile Feedback", "doi": null, "abstractUrl": "/proceedings-article/cscloud-edgecom/2019/166100a151/1dPoFIuXiJa", "parentPublication": { "id": "proceedings/cscloud-edgecom/2019/1661/0", "title": "2019 6th IEEE International Conference on Cyber Security and Cloud Computing (CSCloud)/ 2019 5th IEEE International Conference on Edge Computing and Scalable Cloud (EdgeCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2019/5606/0/560600a094", "title": "A Survey of Mid-Air Ultrasonic Tactile Feedback", "doi": null, "abstractUrl": "/proceedings-article/ism/2019/560600a094/1gFJdA6C4TK", "parentPublication": { "id": "proceedings/ism/2019/5606/0", "title": "2019 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a350", "title": "Investigating Remote Tactile Feedback for Mid-Air Text-Entry in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a350/1pysyvL4CwU", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKir3", "title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "acronym": "icmew", "groupId": "1801805", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WK5AoH", "doi": "10.1109/ICMEW.2018.8551511", "title": "Eye Tracking-Based 360 Vr Foveated/Tiled Video Rendering", "normalizedTitle": "Eye Tracking-Based 360 Vr Foveated/Tiled Video Rendering", "abstract": "To increase the sense of immersion of 360 virtual reality (VR) images, this paper proposes and implements the foveated rendering technology through precise region-of-interest (ROI) detection using eye-tracking-based head-mounted display. It uses HEVC tiled video-based image-decoding and -rendering method, which results show high rendering speeds and high-quality textures.", "abstracts": [ { "abstractType": "Regular", "content": "To increase the sense of immersion of 360 virtual reality (VR) images, this paper proposes and implements the foveated rendering technology through precise region-of-interest (ROI) detection using eye-tracking-based head-mounted display. It uses HEVC tiled video-based image-decoding and -rendering method, which results show high rendering speeds and high-quality textures.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "To increase the sense of immersion of 360 virtual reality (VR) images, this paper proposes and implements the foveated rendering technology through precise region-of-interest (ROI) detection using eye-tracking-based head-mounted display. It uses HEVC tiled video-based image-decoding and -rendering method, which results show high rendering speeds and high-quality textures.", "fno": "08551511", "keywords": [ "Eye Tracking", "360 Video VR", "Tiled Video" ], "authors": [ { "affiliation": "Korea Electronics Technology Institute (KETI)", "fullName": "Hyunwook Kim", "givenName": "Hyunwook", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "Korea Electronics Technology Institute (KETI)", "fullName": "JinWook Yang", "givenName": "JinWook", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Korea Electronics Technology Institute (KETI)", "fullName": "Junsuk Lee", "givenName": "Junsuk", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "Korea Electronics Technology Institute (KETI)", "fullName": "Sangpil Yoon", "givenName": "Sangpil", "surname": "Yoon", "__typename": "ArticleAuthorType" }, { "affiliation": "Korea Electronics Technology Institute (KETI)", "fullName": "Youngwha Kim", "givenName": "Youngwha", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "Korea Electronics Technology Institute (KETI)", "fullName": "Minsu Choi", "givenName": "Minsu", "surname": "Choi", "__typename": "ArticleAuthorType" }, { "affiliation": "Korea Electronics Technology Institute (KETI)", "fullName": "Jaeyoung Yang", "givenName": "Jaeyoung", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Engineering, Gachon Universit", "fullName": "Eun-Seok Ryu", "givenName": "Eun-Seok", "surname": "Ryu", "__typename": "ArticleAuthorType" }, { "affiliation": "Korea Electronics Technology Institute (KETI)", "fullName": "Woochool Park", "givenName": "Woochool", "surname": "Park", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmew", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-07-01T00:00:00", "pubType": "proceedings", "pages": "1-1", "year": "2018", "issn": null, "isbn": "978-1-5386-4195-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08551567", "articleId": "17D45WaTkpi", "__typename": "AdjacentArticleType" }, "next": { "fno": "08551539", "articleId": "17D45W1Oa5H", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/etvis/2016/4731/0/07851170", "title": "An analysis of eye-tracking data in foveated ray tracing", "doi": null, "abstractUrl": "/proceedings-article/etvis/2016/07851170/12OmNvT2pjL", "parentPublication": { "id": "proceedings/etvis/2016/4731/0", "title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a756", "title": "Rectangular Mapping-based Foveated Rendering", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a756/1CJcj9wHjH2", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a471", "title": "Locomotion-aware Foveated Rendering", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a471/1MNgzzb0RWg", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/09005240", "title": "Eye-dominance-guided Foveated Rendering", "doi": null, "abstractUrl": "/journal/tg/2020/05/09005240/1hzNcOce8OQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccem/2019/6334/0/633400a032", "title": "Low-Cost Eye Tracking for Foveated Rendering Using Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/ccem/2019/633400a032/1iHT2m32LIY", "parentPublication": { "id": "proceedings/ccem/2019/6334/0", "title": "2019 IEEE International Conference on Cloud Computing in Emerging Markets (CCEM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090471", "title": "Efficient Peripheral Flicker Reduction for Foveated Rendering in Mobile VR Systems", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090471/1jIxm9DsWDS", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a001", "title": "Foveated Instant Radiosity", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a001/1pysxhw4Bqw", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2021/9101/0/09333964", "title": "Implementing Viewport Tile Extractor for Viewport-Adaptive 360-Degree Video Tiled Streaming", "doi": null, "abstractUrl": "/proceedings-article/icoin/2021/09333964/1qTrL1nfEyc", "parentPublication": { "id": "proceedings/icoin/2021/9101/0", "title": "2021 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382903", "title": "A Log-Rectilinear Transformation for Foveated 360-degree Video Streaming", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382903/1saZxiH9uaQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523836", "title": "Foveated Photon Mapping", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KaHFgmCCXK", "title": "2022 IEEE International Symposium on Multimedia (ISM)", "acronym": "ism", "groupId": "1001094", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1KaHLcFruNO", "doi": "10.1109/ISM55400.2022.00025", "title": "Semantic-Aware View Prediction for 360-Degree Videos at the 5G Edge", "normalizedTitle": "Semantic-Aware View Prediction for 360-Degree Videos at the 5G Edge", "abstract": "In a 5G testbed, we use 360&#x00B0; video streaming to test, measure, and demonstrate the 5G infrastructure, including the capabilities and challenges of edge computing support. Specifically, we use the SEAWARE (Semantic-Aware View Prediction) software system, originally described in [1], at the edge of the 5G network to support a 360&#x00B0; video player (handling tiled videos) by view prediction. Originally, SEAWARE performs semantic analysis of a 360&#x00B0; video on the media server, by extracting, e.g., important objects and events. This video semantic information is encoded in specific data structures and shared with the client in a DASH streaming framework. Making use of these data structures, the client/player can perform view prediction without in-depth, computationally expensive semantic video analysis. In this paper, the SEAWARE system was ported and adapted to run (partially) on the edge where it can be used to predict views and prefetch predicted segments/tiles in high quality in order to have them available close to the client when requested. The paper gives an overview of the 5G testbed, the overall architecture, and the implementation of SEAWARE at the edge server. Since an important goal of this work is to achieve low motion-to-glass latencies, we developed and describe \"tile postloading\", a technique that allows non-predicted tiles to be fetched in high quality into a segment already available in the player buffer. The performance of 360&#x00B0; tiled video playback on the 5G infrastructure is evaluated and presented. Current limitations of the 5G network in use and some challenges of DASH-based streaming and of edge-assisted viewport prediction under \"real-world\" constraints are pointed out; further, the performance benefits of tile postloading are disclosed.", "abstracts": [ { "abstractType": "Regular", "content": "In a 5G testbed, we use 360&#x00B0; video streaming to test, measure, and demonstrate the 5G infrastructure, including the capabilities and challenges of edge computing support. Specifically, we use the SEAWARE (Semantic-Aware View Prediction) software system, originally described in [1], at the edge of the 5G network to support a 360&#x00B0; video player (handling tiled videos) by view prediction. Originally, SEAWARE performs semantic analysis of a 360&#x00B0; video on the media server, by extracting, e.g., important objects and events. This video semantic information is encoded in specific data structures and shared with the client in a DASH streaming framework. Making use of these data structures, the client/player can perform view prediction without in-depth, computationally expensive semantic video analysis. In this paper, the SEAWARE system was ported and adapted to run (partially) on the edge where it can be used to predict views and prefetch predicted segments/tiles in high quality in order to have them available close to the client when requested. The paper gives an overview of the 5G testbed, the overall architecture, and the implementation of SEAWARE at the edge server. Since an important goal of this work is to achieve low motion-to-glass latencies, we developed and describe \"tile postloading\", a technique that allows non-predicted tiles to be fetched in high quality into a segment already available in the player buffer. The performance of 360&#x00B0; tiled video playback on the 5G infrastructure is evaluated and presented. Current limitations of the 5G network in use and some challenges of DASH-based streaming and of edge-assisted viewport prediction under \"real-world\" constraints are pointed out; further, the performance benefits of tile postloading are disclosed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In a 5G testbed, we use 360° video streaming to test, measure, and demonstrate the 5G infrastructure, including the capabilities and challenges of edge computing support. Specifically, we use the SEAWARE (Semantic-Aware View Prediction) software system, originally described in [1], at the edge of the 5G network to support a 360° video player (handling tiled videos) by view prediction. Originally, SEAWARE performs semantic analysis of a 360° video on the media server, by extracting, e.g., important objects and events. This video semantic information is encoded in specific data structures and shared with the client in a DASH streaming framework. Making use of these data structures, the client/player can perform view prediction without in-depth, computationally expensive semantic video analysis. In this paper, the SEAWARE system was ported and adapted to run (partially) on the edge where it can be used to predict views and prefetch predicted segments/tiles in high quality in order to have them available close to the client when requested. The paper gives an overview of the 5G testbed, the overall architecture, and the implementation of SEAWARE at the edge server. Since an important goal of this work is to achieve low motion-to-glass latencies, we developed and describe \"tile postloading\", a technique that allows non-predicted tiles to be fetched in high quality into a segment already available in the player buffer. The performance of 360° tiled video playback on the 5G infrastructure is evaluated and presented. Current limitations of the 5G network in use and some challenges of DASH-based streaming and of edge-assisted viewport prediction under \"real-world\" constraints are pointed out; further, the performance benefits of tile postloading are disclosed.", "fno": "717200a121", "keywords": [ "Client Server Systems", "Data Structures", "Image Segmentation", "Storage Management", "Video Cameras", "Video Signal Processing", "Video Streaming", "360 Degree Videos", "360 X 00 B 0 Tiled Video Playback", "360 X 00 B 0 Video Player", "Computationally Expensive Semantic Video Analysis", "DASH Streaming Framework", "Edge Server", "Edge Assisted Viewport Prediction", "Performs Semantic Analysis", "SEAWARE Software System", "SEAWARE System", "Semantic Aware View Prediction", "Specific Data Structures", "Tile Postloading", "Tiled Videos", "Video Semantic Information", "5 G Mobile Communication", "Motion Segmentation", "Prefetching", "Semantics", "Multimedia Computing", "Streaming Media", "Media", "Tile Based 360 X 00 B 0 Video Streaming", "Viewport Prediction", "Tile Postloading", "5 G Networks", "Edge Computing" ], "authors": [ { "affiliation": "University of Klagenfurt,Dept. of Information Technology,Klagenfurt", "fullName": "Shivi Vats", "givenName": "Shivi", "surname": "Vats", "__typename": "ArticleAuthorType" }, { "affiliation": "California Baptist University,Dept. of Electrical and Computer Engineering,Riverside,CA,USA", "fullName": "Jounsup Park", "givenName": "Jounsup", "surname": "Park", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Illinois,Dept. of Computer Science,Urbana-Champaign,IL,USA", "fullName": "Klara Nahrstedt", "givenName": "Klara", "surname": "Nahrstedt", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Massachusetts,Dept. of Electrical & Computer Engineering,Amherst,MA,USA", "fullName": "Michael Zink", "givenName": "Michael", "surname": "Zink", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Massachusetts,College of Information & Computer Sciences,Amherst,MA,USA", "fullName": "Ramesh Sitaraman", "givenName": "Ramesh", "surname": "Sitaraman", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Klagenfurt,Dept. of Information Technology,Klagenfurt", "fullName": "Hermann Hellwagner", "givenName": "Hermann", "surname": "Hellwagner", "__typename": "ArticleAuthorType" } ], "idPrefix": "ism", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-12-01T00:00:00", "pubType": "proceedings", "pages": "121-128", "year": "2022", "issn": null, "isbn": "978-1-6654-7172-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "717200a113", "articleId": "1KaHFLtYa4g", "__typename": "AdjacentArticleType" }, "next": { "fno": "717200a129", "articleId": "1KaHLLUg0Yo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2021/3734/0/373400a138", "title": "L3BOU: Low Latency, Low Bandwidth, Optimized Super-Resolution Backhaul for 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/ism/2021/373400a138/1A3j9j4t2Gk", "parentPublication": { "id": "proceedings/ism/2021/3734/0", "title": "2021 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/09842378", "title": "Muster: Multi-source Streaming for Tile-based 360&#x00B0; Videos within Cloud Native 5G Networks", "doi": null, "abstractUrl": "/journal/tm/5555/01/09842378/1FlM107xCMw", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2022/6457/0/645700a609", "title": "InstaVarjoLive: An Edge-Assisted 360 Degree Video Live Streaming for Virtual Reality Testbed", "doi": null, "abstractUrl": "/proceedings-article/msn/2022/645700a609/1LUtLUWKy4g", "parentPublication": { "id": "proceedings/msn/2022/6457/0", "title": "2022 18th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wowmom/2020/7374/0/737400a191", "title": "A QoE and Visual Attention Evaluation on the Influence of Audio in 360&#x00B0; Videos", "doi": null, "abstractUrl": "/proceedings-article/wowmom/2020/737400a191/1nMQCKTCoeY", "parentPublication": { "id": "proceedings/wowmom/2020/7374/0", "title": "2020 IEEE 21st International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloud/2020/8780/0/878000a337", "title": "Allies: Tile-Based Joint Transcoding, Delivery and Caching of 360&#x00B0; Videos in Edge Cloud Networks", "doi": null, "abstractUrl": "/proceedings-article/cloud/2020/878000a337/1pF6lo64jOo", "parentPublication": { "id": "proceedings/cloud/2020/8780/0", "title": "2020 IEEE 13th International Conference on Cloud Computing (CLOUD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a077", "title": "CooPEC: Cooperative Prefetching and Edge Caching for Adaptive 360&#x00B0; Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a077/1qBbI2Tm5os", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a082", "title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2020/9916/0/991600a291", "title": "MEC-Assisted FoV-Aware and QoE-Driven Adaptive 360&#x00B0; Video Streaming for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/msn/2020/991600a291/1sBO3kw7jnq", "parentPublication": { "id": "proceedings/msn/2020/9916/0", "title": "2020 16th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382903", "title": "A Log-Rectilinear Transformation for Foveated 360-degree Video Streaming", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382903/1saZxiH9uaQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wowmom/2021/2263/0/226300a225", "title": "PhD Forum: Encrypted Traffic Analysis &#x0026; Content Awareness of 360-Degree Video Streaming Optimization", "doi": null, "abstractUrl": "/proceedings-article/wowmom/2021/226300a225/1uZwVXJlFug", "parentPublication": { "id": "proceedings/wowmom/2021/2263/0", "title": "2021 IEEE 22nd International Symposium on a World of Wireless, Mobile and Multimedia Networks (WoWMoM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1LUtGPch8kM", "title": "2022 18th International Conference on Mobility, Sensing and Networking (MSN)", "acronym": "msn", "groupId": "10076543", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1LUtLUWKy4g", "doi": "10.1109/MSN57253.2022.00101", "title": "InstaVarjoLive: An Edge-Assisted 360 Degree Video Live Streaming for Virtual Reality Testbed", "normalizedTitle": "InstaVarjoLive: An Edge-Assisted 360 Degree Video Live Streaming for Virtual Reality Testbed", "abstract": "Virtual Reality (VR) challenges us with the requirements of ultra-low latency and ultra-high bandwidth. Existing methods that rely on cloud computing systems to improve the latency and bandwidth problems cannot satisfy the high computation and fast communication requirements in VR. Edge computing has emerged as a promising solution that can be applied in VR to optimize the latency and bandwidth prob-lems. However, another challenge is applying edge computing technology to improve the seamless for the VR users. Based on this, this paper proposes an edge-end collaboration testbed called Insta VarjoLive and conducts the experiments on the real-time 360&#x00B0; video live streaming seamless using VR headsets. We compared our experiments with the 360&#x00B0; videos watched by users from the cloud through VR headset and obtained the results showing that the edge-assisted 360&#x00B0; videos live streaming method has three major advantages: better real-time delivery, lower response time, and higher bandwidth guaranteed. Furthermore, we tested our experiments and discussed the other possible optimization methods in the future.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual Reality (VR) challenges us with the requirements of ultra-low latency and ultra-high bandwidth. Existing methods that rely on cloud computing systems to improve the latency and bandwidth problems cannot satisfy the high computation and fast communication requirements in VR. Edge computing has emerged as a promising solution that can be applied in VR to optimize the latency and bandwidth prob-lems. However, another challenge is applying edge computing technology to improve the seamless for the VR users. Based on this, this paper proposes an edge-end collaboration testbed called Insta VarjoLive and conducts the experiments on the real-time 360&#x00B0; video live streaming seamless using VR headsets. We compared our experiments with the 360&#x00B0; videos watched by users from the cloud through VR headset and obtained the results showing that the edge-assisted 360&#x00B0; videos live streaming method has three major advantages: better real-time delivery, lower response time, and higher bandwidth guaranteed. Furthermore, we tested our experiments and discussed the other possible optimization methods in the future.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual Reality (VR) challenges us with the requirements of ultra-low latency and ultra-high bandwidth. Existing methods that rely on cloud computing systems to improve the latency and bandwidth problems cannot satisfy the high computation and fast communication requirements in VR. Edge computing has emerged as a promising solution that can be applied in VR to optimize the latency and bandwidth prob-lems. However, another challenge is applying edge computing technology to improve the seamless for the VR users. Based on this, this paper proposes an edge-end collaboration testbed called Insta VarjoLive and conducts the experiments on the real-time 360° video live streaming seamless using VR headsets. We compared our experiments with the 360° videos watched by users from the cloud through VR headset and obtained the results showing that the edge-assisted 360° videos live streaming method has three major advantages: better real-time delivery, lower response time, and higher bandwidth guaranteed. Furthermore, we tested our experiments and discussed the other possible optimization methods in the future.", "fno": "645700a609", "keywords": [ "Cloud Computing", "Edge Computing", "Video Streaming", "Virtual Reality", "Bandwidth Prob Lems", "Bandwidth Problems", "Edge Computing Technology", "Edge Assisted 360 Degree Video Live Streaming", "Edge Assisted 360 X 00 B 0 Videos", "Edge End Collaboration", "Fast Communication Requirements", "High Computation", "Possible Optimization Methods", "Real Time 360 X 00 B 0 Video", "Ultra High Bandwidth", "Ultra Low Latency", "Virtual Reality", "VR Headset", "VR Users", "Headphones", "Cloud Computing", "Optimization Methods", "Bandwidth", "Virtual Reality", "Streaming Media", "Real Time Systems", "Edge Computing", "Virtual Reality", "360 Degree Video Live Streaming", "Testbed Architecture" ], "authors": [ { "affiliation": "School of Information Technology, Deakin University,Waurn Ponds,Australia", "fullName": "Pengyu Li", "givenName": "Pengyu", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Information Technology, Deakin University,Waurn Ponds,Australia", "fullName": "Feifei Chen", "givenName": "Feifei", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Computational Modelling DATA61 CSIRO,Melbourne,Australia", "fullName": "Rui Wang", "givenName": "Rui", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Information Technology, Deakin University,Waurn Ponds,Australia", "fullName": "Thuong Hoang", "givenName": "Thuong", "surname": "Hoang", "__typename": "ArticleAuthorType" }, { "affiliation": "Centre for Cyber Security Research and Innovation, School of IT, Deakin University,Waurn Ponds,Australia", "fullName": "Lei Pan", "givenName": "Lei", "surname": "Pan", "__typename": "ArticleAuthorType" } ], "idPrefix": "msn", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-12-01T00:00:00", "pubType": "proceedings", "pages": "609-613", "year": "2022", "issn": null, "isbn": "978-1-6654-6457-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "645700a601", "articleId": "1LUtUqQ6TZe", "__typename": "AdjacentArticleType" }, "next": { "fno": "645700a614", "articleId": "1LUtQvakwZq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2018/6857/0/685700a081", "title": "Efficient Live and on-Demand Tiled HEVC 360 VR Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/ism/2018/685700a081/17D45WKWnJC", "parentPublication": { "id": "proceedings/ism/2018/6857/0", "title": "2018 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2018/4195/0/08551577", "title": "Viewport-Driven Rate-Distortion Optimized Scalable Live 360&#x00B0; Video Network Multicast", "doi": null, "abstractUrl": "/proceedings-article/icmew/2018/08551577/17D45WZZ7Db", "parentPublication": { "id": "proceedings/icmew/2018/4195/0", "title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2018/6857/0/685700a044", "title": "Edge-Assisted Rendering of 360° Videos Streamed to Head-Mounted Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ism/2018/685700a044/17D45WrVfZo", "parentPublication": { "id": "proceedings/ism/2018/6857/0", "title": "2018 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2021/3734/0/373400a138", "title": "L3BOU: Low Latency, Low Bandwidth, Optimized Super-Resolution Backhaul for 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/ism/2021/373400a138/1A3j9j4t2Gk", "parentPublication": { "id": "proceedings/ism/2021/3734/0", "title": "2021 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a600", "title": "Ebublio: Edge Assisted Multi-user 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a600/1CJdFxYlvfG", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859963", "title": "CoLive: An Edge-Assisted Online Learning Framework for Viewport Prediction in 360&#x00B0; Live Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859963/1G9EwWVBvuo", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ucc/2020/2394/0/239400a402", "title": "A 360&#x00B0; Video Adaptive Streaming Scheme Based on Multiple Video Qualities", "doi": null, "abstractUrl": "/proceedings-article/ucc/2020/239400a402/1pZ0ZIjk5vq", "parentPublication": { "id": "proceedings/ucc/2020/2394/0", "title": "2020 IEEE/ACM 13th International Conference on Utility and Cloud Computing (UCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2020/9916/0/991600a291", "title": "MEC-Assisted FoV-Aware and QoE-Driven Adaptive 360&#x00B0; Video Streaming for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/msn/2020/991600a291/1sBO3kw7jnq", "parentPublication": { "id": "proceedings/msn/2020/9916/0", "title": "2020 16th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382903", "title": "A Log-Rectilinear Transformation for Foveated 360-degree Video Streaming", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382903/1saZxiH9uaQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wowmom/2021/2263/0/226300a225", "title": "PhD Forum: Encrypted Traffic Analysis &#x0026; Content Awareness of 360-Degree Video Streaming Optimization", "doi": null, "abstractUrl": "/proceedings-article/wowmom/2021/226300a225/1uZwVXJlFug", "parentPublication": { "id": "proceedings/wowmom/2021/2263/0", "title": "2021 IEEE 22nd International Symposium on a World of Wireless, Mobile and Multimedia Networks (WoWMoM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qBbG37ozSg", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "acronym": "ism", "groupId": "1001094", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qBbHaCz3vG", "doi": "10.1109/ISM.2020.00021", "title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC", "normalizedTitle": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC", "abstract": "Virtual reality applications create an immersive experience using 360&#x00B0; video with high resolution and frame rate. However, since the user only views a portion of 360&#x00B0; video according to his/her current viewport, streaming the whole content with high resolution causes bandwidth wastage. To address this issue, viewport-dependent approaches have been proposed such that only the part of the video which falls within user's current viewport is transmitted in high quality while the rest of the content is transmitted in lower quality. The selection of high- and low-quality parts is constantly adapted according to the user's head motion, which requires frequent intra coded frames at switching points, leading to an increment in the overall streaming bitrate. In this paper a viewport-adaptive streaming scheme is introduced, which avoids intra frames at switching points by introducing long intra period for non-changing parts of the content during head motion. This scheme has been realized taking advantage of mixed Video Coding Layer (VCL) Network Abstraction Layer (NAL) unit feature of Versatile Video Coding (VVC) standard. This method reduces bitrate significantly, especially for the sequences with either no or only slow camera motion, which is common for 360&#x00B0; video capturing.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality applications create an immersive experience using 360&#x00B0; video with high resolution and frame rate. However, since the user only views a portion of 360&#x00B0; video according to his/her current viewport, streaming the whole content with high resolution causes bandwidth wastage. To address this issue, viewport-dependent approaches have been proposed such that only the part of the video which falls within user's current viewport is transmitted in high quality while the rest of the content is transmitted in lower quality. The selection of high- and low-quality parts is constantly adapted according to the user's head motion, which requires frequent intra coded frames at switching points, leading to an increment in the overall streaming bitrate. In this paper a viewport-adaptive streaming scheme is introduced, which avoids intra frames at switching points by introducing long intra period for non-changing parts of the content during head motion. This scheme has been realized taking advantage of mixed Video Coding Layer (VCL) Network Abstraction Layer (NAL) unit feature of Versatile Video Coding (VVC) standard. This method reduces bitrate significantly, especially for the sequences with either no or only slow camera motion, which is common for 360&#x00B0; video capturing.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality applications create an immersive experience using 360° video with high resolution and frame rate. However, since the user only views a portion of 360° video according to his/her current viewport, streaming the whole content with high resolution causes bandwidth wastage. To address this issue, viewport-dependent approaches have been proposed such that only the part of the video which falls within user's current viewport is transmitted in high quality while the rest of the content is transmitted in lower quality. The selection of high- and low-quality parts is constantly adapted according to the user's head motion, which requires frequent intra coded frames at switching points, leading to an increment in the overall streaming bitrate. In this paper a viewport-adaptive streaming scheme is introduced, which avoids intra frames at switching points by introducing long intra period for non-changing parts of the content during head motion. This scheme has been realized taking advantage of mixed Video Coding Layer (VCL) Network Abstraction Layer (NAL) unit feature of Versatile Video Coding (VVC) standard. This method reduces bitrate significantly, especially for the sequences with either no or only slow camera motion, which is common for 360° video capturing.", "fno": "869700a085", "keywords": [ "Image Resolution", "Video Coding", "Video Streaming", "Virtual Reality", "360 X 00 B 0 Video Capturing", "VVC", "Virtual Reality Applications", "Frame Rate", "Viewport Dependent Approaches", "Head Motion", "Switching Points", "Streaming Bitrate", "Viewport Adaptive Streaming Scheme", "Intra Frames", "Versatile Video Coding Standard", "Mixed Video Coding Layer Network Abstraction Layer Unit Feature", "Subpicture Based Viewport Dependent 360 Degree Video Streaming", "Camera Motion", "High Resolution", "Switches", "Streaming Media", "Decoding", "Bit Rate", "Standards", "Spatial Resolution", "Rate Distortion", "Virtual Reality", "Versatile Video Coding" ], "authors": [ { "affiliation": "Nokia Technologies,Tampere,Finland", "fullName": "Maryam Homayouni", "givenName": "Maryam", "surname": "Homayouni", "__typename": "ArticleAuthorType" }, { "affiliation": "Nokia Technologies,Tampere,Finland", "fullName": "Alireza Aminlou", "givenName": "Alireza", "surname": "Aminlou", "__typename": "ArticleAuthorType" }, { "affiliation": "Nokia Technologies,Tampere,Finland", "fullName": "Miska M. Hannuksela", "givenName": "Miska M.", "surname": "Hannuksela", "__typename": "ArticleAuthorType" } ], "idPrefix": "ism", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "85-90", "year": "2020", "issn": null, "isbn": "978-1-7281-8697-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "869700a082", "articleId": "1qBbIEON8UU", "__typename": "AdjacentArticleType" }, "next": { "fno": "869700a091", "articleId": "1qBbInlTrGM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2017/2937/0/2937a038", "title": "A New Adaptation Approach for Viewport-adaptive 360-degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a038/12OmNwwd2MD", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2016/4571/0/4571a583", "title": "Viewport-Adaptive Encoding and Streaming of 360-Degree Video for Virtual Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/ism/2016/4571a583/12OmNzsJ7Ig", "parentPublication": { "id": "proceedings/ism/2016/4571/0", "title": "2016 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2018/4195/0/08551577", "title": "Viewport-Driven Rate-Distortion Optimized Scalable Live 360&#x00B0; Video Network Multicast", "doi": null, "abstractUrl": "/proceedings-article/icmew/2018/08551577/17D45WZZ7Db", "parentPublication": { "id": "proceedings/icmew/2018/4195/0", "title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/lcn/2018/4413/0/08638092", "title": "Plato: Learning-based Adaptive Streaming of 360-Degree Videos", "doi": null, "abstractUrl": "/proceedings-article/lcn/2018/08638092/18rqIpj1b3i", "parentPublication": { "id": "proceedings/lcn/2018/4413/0", "title": "2018 IEEE 43rd Conference on Local Computer Networks (LCN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2021/0668/0/066800a462", "title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction", "doi": null, "abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva", "parentPublication": { "id": "proceedings/msn/2021/0668/0", "title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859789", "title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300k0169", "title": "Viewport Proposal CNN for 360&#x00B0; Video Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300k0169/1gyrgYBrmpy", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/04/09212608", "title": "Viewport-Based CNN: A Multi-Task Approach for Assessing 360&#x00B0; Video Quality", "doi": null, "abstractUrl": "/journal/tp/2022/04/09212608/1nG8VYgj7Ik", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/07/09261971", "title": "Online Bitrate Selection for Viewport Adaptive 360-Degree Video Streaming", "doi": null, "abstractUrl": "/journal/tm/2022/07/09261971/1oPzPzmWa9W", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a082", "title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1uZwURNxBsY", "title": "2021 IEEE 22nd International Symposium on a World of Wireless, Mobile and Multimedia Networks (WoWMoM)", "acronym": "wowmom", "groupId": "1001379", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1uZwVXJlFug", "doi": "10.1109/WoWMoM51794.2021.00038", "title": "PhD Forum: Encrypted Traffic Analysis &#x0026; Content Awareness of 360-Degree Video Streaming Optimization", "normalizedTitle": "PhD Forum: Encrypted Traffic Analysis & Content Awareness of 360-Degree Video Streaming Optimization", "abstract": "360&#x00B0;/VR videos are getting popular. However, these videos demand high bandwidth and processing power at the end devices. Though, viewport (VP) aware streaming can reduce the amount of data transmitted by streaming a limited portion of the frame covering the current user viewport, popular content providers still transfer the entire panoramic frame which demands more bandwidth. Also, these mechanisms, which partition the frames into a fixed number of tiles can not provide a finer boundary to cover the user VP, causing high pixel redundancy. To address these issues, first, we propose an offline and near-realtime 360&#x00B0; vs normal video classification tool which is further extended to analyse 360&#x00B0; video streaming distribution in the wild. Secondly, we propose a content aware 360&#x00B0; video partitioning tool leveraging a computational geometric approach. Our initial results show the feasibility of both proposals.", "abstracts": [ { "abstractType": "Regular", "content": "360&#x00B0;/VR videos are getting popular. However, these videos demand high bandwidth and processing power at the end devices. Though, viewport (VP) aware streaming can reduce the amount of data transmitted by streaming a limited portion of the frame covering the current user viewport, popular content providers still transfer the entire panoramic frame which demands more bandwidth. Also, these mechanisms, which partition the frames into a fixed number of tiles can not provide a finer boundary to cover the user VP, causing high pixel redundancy. To address these issues, first, we propose an offline and near-realtime 360&#x00B0; vs normal video classification tool which is further extended to analyse 360&#x00B0; video streaming distribution in the wild. Secondly, we propose a content aware 360&#x00B0; video partitioning tool leveraging a computational geometric approach. Our initial results show the feasibility of both proposals.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "360°/VR videos are getting popular. However, these videos demand high bandwidth and processing power at the end devices. Though, viewport (VP) aware streaming can reduce the amount of data transmitted by streaming a limited portion of the frame covering the current user viewport, popular content providers still transfer the entire panoramic frame which demands more bandwidth. Also, these mechanisms, which partition the frames into a fixed number of tiles can not provide a finer boundary to cover the user VP, causing high pixel redundancy. To address these issues, first, we propose an offline and near-realtime 360° vs normal video classification tool which is further extended to analyse 360° video streaming distribution in the wild. Secondly, we propose a content aware 360° video partitioning tool leveraging a computational geometric approach. Our initial results show the feasibility of both proposals.", "fno": "226300a225", "keywords": [ "Cryptography", "Internet", "Optimisation", "Telecommunication Traffic", "Video Cameras", "Video Signal Processing", "Video Streaming", "Traffic Analysis", "Computational Geometric Approach", "Content Providers", "Content Aware 360 X 00 B 0 Video Partitioning Tool", "360 X 00 B 0 Video Streaming Distribution", "Normal Video Classification Tool", "High Pixel Redundancy", "Entire Panoramic Frame", "User Viewport", "Viewport Aware Streaming", "Processing Power", "360 Degree Video Streaming Optimization", "Wireless Communication", "Redundancy", "Bandwidth", "Streaming Media", "Tools", "Prediction Algorithms", "Partitioning Algorithms", "360 X 00 B 0 Video Streaming", "Encrypted Traffic Classification", "360 X 00 B 0 Video Frame Tiling" ], "authors": [ { "affiliation": "The University of Sydney,Australia", "fullName": "Chamara Kattadige", "givenName": "Chamara", "surname": "Kattadige", "__typename": "ArticleAuthorType" } ], "idPrefix": "wowmom", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "225-226", "year": "2021", "issn": null, "isbn": "978-1-6654-2263-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "226300a223", "articleId": "1uZwVRLul0c", "__typename": "AdjacentArticleType" }, "next": { "fno": "226300a227", "articleId": "1uZwXt2yhva", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/percom/2022/1643/0/09762386", "title": "OpCASH: Optimized Utilization of MEC Cache for 360-Degree Video Streaming with Dynamic Tiling", "doi": null, "abstractUrl": "/proceedings-article/percom/2022/09762386/1CUOhFx88Ni", "parentPublication": { "id": "proceedings/percom/2022/1643/0", "title": "2022 IEEE International Conference on Pervasive Computing and Communications (PerCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2021/0668/0/066800a462", "title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction", "doi": null, "abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva", "parentPublication": { "id": "proceedings/msn/2021/0668/0", "title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wowmom/2022/0876/0/087600a281", "title": "Head Movement-aware MPEG-DASH SRD-based 360&#x00B0; Video VR Streaming System over Wireless Network", "doi": null, "abstractUrl": "/proceedings-article/wowmom/2022/087600a281/1FHqcfLbws0", "parentPublication": { "id": "proceedings/wowmom/2022/0876/0", "title": "2022 IEEE 23rd International Symposium on a World of Wireless, Mobile and Multimedia Networks (WoWMoM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859789", "title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859963", "title": "CoLive: An Edge-Assisted Online Learning Framework for Viewport Prediction in 360&#x00B0; Live Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859963/1G9EwWVBvuo", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2019/5606/0/560600a200", "title": "Encoding Configurations for Tile-Based 360&#x00B0; Video", "doi": null, "abstractUrl": "/proceedings-article/ism/2019/560600a200/1gFJebKape8", "parentPublication": { "id": "proceedings/ism/2019/5606/0", "title": "2019 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300k0169", "title": "Viewport Proposal CNN for 360&#x00B0; Video Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300k0169/1gyrgYBrmpy", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/04/09212608", "title": "Viewport-Based CNN: A Multi-Task Approach for Assessing 360&#x00B0; Video Quality", "doi": null, "abstractUrl": "/journal/tp/2022/04/09212608/1nG8VYgj7Ik", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a085", "title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a082", "title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwB2dUd", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "acronym": "3dui", "groupId": "1001623", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNAm4THA", "doi": "10.1109/3DUI.2016.7460039", "title": "SWIFTER: Design and evaluation of a speech-based text input metaphor for immersive virtual environments", "normalizedTitle": "SWIFTER: Design and evaluation of a speech-based text input metaphor for immersive virtual environments", "abstract": "Text input is an important part of the data annotation process, where text is used to capture ideas and comments. For text entry in immersive virtual environments, for which standard keyboards usually do not work, various approaches have been proposed. While these solutions have mostly proven effective, there still remain certain shortcomings making further investigations worthwhile. Motivated by recent research, we propose the speech-based multimodal text entry system SWIFTER, which strives for simplicity while maintaining good performance. In an initial user study, we compared our approach to smartphone-based text entry within a CAVE-like virtual environment. Results indicate that SWIFTER reaches an average input rate of 23.6 words per minute and is positively received by users in terms of user experience.", "abstracts": [ { "abstractType": "Regular", "content": "Text input is an important part of the data annotation process, where text is used to capture ideas and comments. For text entry in immersive virtual environments, for which standard keyboards usually do not work, various approaches have been proposed. While these solutions have mostly proven effective, there still remain certain shortcomings making further investigations worthwhile. Motivated by recent research, we propose the speech-based multimodal text entry system SWIFTER, which strives for simplicity while maintaining good performance. In an initial user study, we compared our approach to smartphone-based text entry within a CAVE-like virtual environment. Results indicate that SWIFTER reaches an average input rate of 23.6 words per minute and is positively received by users in terms of user experience.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Text input is an important part of the data annotation process, where text is used to capture ideas and comments. For text entry in immersive virtual environments, for which standard keyboards usually do not work, various approaches have been proposed. While these solutions have mostly proven effective, there still remain certain shortcomings making further investigations worthwhile. Motivated by recent research, we propose the speech-based multimodal text entry system SWIFTER, which strives for simplicity while maintaining good performance. In an initial user study, we compared our approach to smartphone-based text entry within a CAVE-like virtual environment. Results indicate that SWIFTER reaches an average input rate of 23.6 words per minute and is positively received by users in terms of user experience.", "fno": "07460039", "keywords": [ "Speech Recognition", "Keyboards", "Speech", "User Interfaces", "Virtual Environments", "Context", "Microphones", "H 5 2 Information Interfaces And Presentation User Interfaces Graphical User Interfaces", "I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality" ], "authors": [ { "affiliation": "Visual Computing Institute, RWTH Aachen University, Germany", "fullName": "Sebastian Pick", "givenName": "Sebastian", "surname": "Pick", "__typename": "ArticleAuthorType" }, { "affiliation": "Visual Computing Institute, RWTH Aachen University, Germany", "fullName": "Andrew S. Puika", "givenName": "Andrew S.", "surname": "Puika", "__typename": "ArticleAuthorType" }, { "affiliation": "Visual Computing Institute, RWTH Aachen University, Germany", "fullName": "Torsten W. Kuhlen", "givenName": "Torsten W.", "surname": "Kuhlen", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dui", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-03-01T00:00:00", "pubType": "proceedings", "pages": "109-112", "year": "2016", "issn": null, "isbn": "978-1-5090-0842-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07460038", "articleId": "12OmNxTVU2T", "__typename": "AdjacentArticleType" }, "next": { "fno": "07460040", "articleId": "12OmNBqdr48", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cbms/2017/1710/0/1710a787", "title": "Analyzing the Impact of Cognitive Load in Evaluating Gaze-Based Typing", "doi": null, "abstractUrl": "/proceedings-article/cbms/2017/1710a787/12OmNx1IwaL", "parentPublication": { "id": "proceedings/cbms/2017/1710/0", "title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcsw/2003/1921/0/19210280", "title": "Exploring Edge-Based Input Techniques for Handheld Text Entry", "doi": null, "abstractUrl": "/proceedings-article/icdcsw/2003/19210280/12OmNywfKL1", "parentPublication": { "id": "proceedings/icdcsw/2003/1921/0", "title": "23rd International Conference on Distributed Computing Systems Workshops, 2003. Proceedings.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446059", "title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a694", "title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049665", "title": "Text Input for Non-Stationary XR Workspaces: Investigating Tap and Word-Gesture Keyboards in Virtual and Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049665/1KYooqYQbF6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049695", "title": "CrowbarLimbs: A Fatigue-Reducing Virtual Reality Text Entry Metaphor", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049695/1KYowtn3pok", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a140", "title": "Direct Interaction Word-Gesture Text Input in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a140/1KmF8k8WXi8", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797740", "title": "Towards Utilizing Touch-sensitive Physical Keyboards for Text Entry in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797740/1cJ196OGdJm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797754", "title": "A Capacitive-sensing Physical Keyboard for VR Text Entry", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797754/1cJ1cJDgPXq", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a387", "title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a387/1gyslQzq07K", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNySXF3W", "title": "2015 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "acronym": "vlhcc", "groupId": "1001007", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNB0Fxid", "doi": "10.1109/VLHCC.2015.7357217", "title": "A syntax-directed keyboard extension for writing source code on touchscreen devices", "normalizedTitle": "A syntax-directed keyboard extension for writing source code on touchscreen devices", "abstract": "As touchscreen mobile devices grow in popularity, it is inevitable that software developers will eventually want to write code on them. However, writing code on a soft (or virtual) keyboard is cumbersome due to the device size and lack of tactile feedback. We present a soft syntax-directed keyboard extension to the QWERTY keyboard for Java program input on touchscreen devices and evaluate this keyboard with Java programmers. Our results indicate that a programmer using the keyboard extension can input a Java program with fewer errors and using fewer keystrokes per character than when using a standard soft keyboard alone. In addition, programmers maintain an overall typing speed in words per minute that is equivalent to that on the standard soft keyboard alone. The keyboard extension was shown to be mentally, physically, and temporally less demanding than the standard soft keyboard alone when inputting a Java program.", "abstracts": [ { "abstractType": "Regular", "content": "As touchscreen mobile devices grow in popularity, it is inevitable that software developers will eventually want to write code on them. However, writing code on a soft (or virtual) keyboard is cumbersome due to the device size and lack of tactile feedback. We present a soft syntax-directed keyboard extension to the QWERTY keyboard for Java program input on touchscreen devices and evaluate this keyboard with Java programmers. Our results indicate that a programmer using the keyboard extension can input a Java program with fewer errors and using fewer keystrokes per character than when using a standard soft keyboard alone. In addition, programmers maintain an overall typing speed in words per minute that is equivalent to that on the standard soft keyboard alone. The keyboard extension was shown to be mentally, physically, and temporally less demanding than the standard soft keyboard alone when inputting a Java program.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As touchscreen mobile devices grow in popularity, it is inevitable that software developers will eventually want to write code on them. However, writing code on a soft (or virtual) keyboard is cumbersome due to the device size and lack of tactile feedback. We present a soft syntax-directed keyboard extension to the QWERTY keyboard for Java program input on touchscreen devices and evaluate this keyboard with Java programmers. Our results indicate that a programmer using the keyboard extension can input a Java program with fewer errors and using fewer keystrokes per character than when using a standard soft keyboard alone. In addition, programmers maintain an overall typing speed in words per minute that is equivalent to that on the standard soft keyboard alone. The keyboard extension was shown to be mentally, physically, and temporally less demanding than the standard soft keyboard alone when inputting a Java program.", "fno": "07357217", "keywords": [ "Arrays", "Containers", "Keyboards", "Tablet Computers", "Standards" ], "authors": [ { "affiliation": "Oregon State University, USA", "fullName": "Islam Almusaly", "givenName": "Islam", "surname": "Almusaly", "__typename": "ArticleAuthorType" }, { "affiliation": "Oregon State University, USA", "fullName": "Ronald Metoyer", "givenName": "Ronald", "surname": "Metoyer", "__typename": "ArticleAuthorType" } ], "idPrefix": "vlhcc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-10-01T00:00:00", "pubType": "proceedings", "pages": "195-202", "year": "2015", "issn": null, "isbn": "978-1-4673-7457-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07357216", "articleId": "12OmNyywxDe", "__typename": "AdjacentArticleType" }, "next": { "fno": "07357218", "articleId": "12OmNxVV5ZD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iit/2015/8509/0/07381520", "title": "Touch interface and keylogging malware", "doi": null, "abstractUrl": "/proceedings-article/iit/2015/07381520/12OmNBeRtOn", "parentPublication": { "id": "proceedings/iit/2015/8509/0", "title": "2015 11th International Conference on Innovations in Information Technology (IIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2017/2628/0/2628a161", "title": "FPGA-based Matrix Keyboard Common IP Core Design and the Implementation Using Verilog HDL", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2017/2628a161/12OmNwDj1aW", "parentPublication": { "id": "proceedings/icmcce/2017/2628/0", "title": "2017 Second International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sp/2018/4353/0/435301a144", "title": "EyeTell: Video-Assisted Touchscreen Keystroke Inference from Eye Movements", "doi": null, "abstractUrl": "/proceedings-article/sp/2018/435301a144/12OmNzC5SIa", "parentPublication": { "id": "proceedings/sp/2018/4353/0", "title": "2018 IEEE Symposium on Security and Privacy (SP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2015/7334/0/7334a142", "title": "Musikin&#x00E9;sia -- An Educational Adventure Game for Keyboard Learning", "doi": null, "abstractUrl": "/proceedings-article/icalt/2015/7334a142/12OmNzTYBTJ", "parentPublication": { "id": "proceedings/icalt/2015/7334/0", "title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2018/4235/0/08506501", "title": "The design and evaluation of a gestural keyboard for entering programming code on mobile devices", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2018/08506501/17D45VUZMWc", "parentPublication": { "id": "proceedings/vlhcc/2018/4235/0", "title": "2018 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2017/0443/0/08103480", "title": "Syntax-directed keyboard extension: Evolution and evaluation", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2017/08103480/17D45WB0qby", "parentPublication": { "id": "proceedings/vlhcc/2017/0443/0", "title": "2017 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2018/4235/0/08506557", "title": "Evaluation of A Visual Programming Keyboard on Touchscreen Devices", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2018/08506557/17D45We0UER", "parentPublication": { "id": "proceedings/vlhcc/2018/4235/0", "title": "2018 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a702", "title": "Personalization of a Mid-Air Gesture Keyboard using Multi-Objective Bayesian Optimization", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a702/1JrQW09ujvi", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlsid/2019/0409/0/040900a539", "title": "Continuous Transparent Mobile Device Touchscreen Soft Keyboard Biometric Authentication", "doi": null, "abstractUrl": "/proceedings-article/vlsid/2019/040900a539/1a3wT0UUlG0", "parentPublication": { "id": "proceedings/vlsid/2019/0409/0", "title": "2019 32nd International Conference on VLSI Design and 2019 18th International Conference on Embedded Systems (VLSID)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798238", "title": "Text Typing in VR Using Smartphones Touchscreen and HMD", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798238/1cJ0Qw94bi8", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKipK", "title": "2017 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "acronym": "vlhcc", "groupId": "1001007", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "17D45WB0qby", "doi": "10.1109/VLHCC.2017.8103480", "title": "Syntax-directed keyboard extension: Evolution and evaluation", "normalizedTitle": "Syntax-directed keyboard extension: Evolution and evaluation", "abstract": "The syntax-directed keyboard extension presented by Almusaly et al. in 2015 allows programmers to input Java source code with fewer errors and keystrokes compared to the soft QWERTY keyboard and it supports a comparable typing speed. While these results were obtained after only 10 minutes of practice, it is unclear how long term use affects performance. In this paper, we present an updated design for the original syntax-directed keyboard extension, replicate the original results, and evaluate the evolved design with Java programmers over eight sessions in a period of two weeks. Our results indicate that a programmer using the new keyboard extension for two weeks can input Java programs 16.5% faster (words per minute) than an expert QWERTY keyboard typist. In addition, we demonstrate that the efficiency and accuracy for inputting Java source code improves with repeated use over time and that perceived mental, physical, and temporal demands of the keyboard extension decrease over time.", "abstracts": [ { "abstractType": "Regular", "content": "The syntax-directed keyboard extension presented by Almusaly et al. in 2015 allows programmers to input Java source code with fewer errors and keystrokes compared to the soft QWERTY keyboard and it supports a comparable typing speed. While these results were obtained after only 10 minutes of practice, it is unclear how long term use affects performance. In this paper, we present an updated design for the original syntax-directed keyboard extension, replicate the original results, and evaluate the evolved design with Java programmers over eight sessions in a period of two weeks. Our results indicate that a programmer using the new keyboard extension for two weeks can input Java programs 16.5% faster (words per minute) than an expert QWERTY keyboard typist. In addition, we demonstrate that the efficiency and accuracy for inputting Java source code improves with repeated use over time and that perceived mental, physical, and temporal demands of the keyboard extension decrease over time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The syntax-directed keyboard extension presented by Almusaly et al. in 2015 allows programmers to input Java source code with fewer errors and keystrokes compared to the soft QWERTY keyboard and it supports a comparable typing speed. While these results were obtained after only 10 minutes of practice, it is unclear how long term use affects performance. In this paper, we present an updated design for the original syntax-directed keyboard extension, replicate the original results, and evaluate the evolved design with Java programmers over eight sessions in a period of two weeks. Our results indicate that a programmer using the new keyboard extension for two weeks can input Java programs 16.5% faster (words per minute) than an expert QWERTY keyboard typist. In addition, we demonstrate that the efficiency and accuracy for inputting Java source code improves with repeated use over time and that perceived mental, physical, and temporal demands of the keyboard extension decrease over time.", "fno": "08103480", "keywords": [ "Keyboards", "Java", "Atmospheric Measurements", "Particle Measurements", "Mobile Handsets", "Performance Evaluation" ], "authors": [ { "affiliation": "Oregon State University", "fullName": "Islam Almusaly", "givenName": "Islam", "surname": "Almusaly", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Notre Dame", "fullName": "Ronald Metoyer", "givenName": "Ronald", "surname": "Metoyer", "__typename": "ArticleAuthorType" }, { "affiliation": "Oregon State University", "fullName": "Carlos Jensen", "givenName": "Carlos", "surname": "Jensen", "__typename": "ArticleAuthorType" } ], "idPrefix": "vlhcc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "285-289", "year": "2017", "issn": "1943-6106", "isbn": "978-1-5386-0443-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08103479", "articleId": "17D45XDIXT3", "__typename": "AdjacentArticleType" }, "next": { "fno": "08103481", "articleId": "17D45XDIXRA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vlhcc/2015/7457/0/07357217", "title": "A syntax-directed keyboard extension for writing source code on touchscreen devices", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2015/07357217/12OmNB0Fxid", "parentPublication": { "id": "proceedings/vlhcc/2015/7457/0", "title": "2015 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2016/4459/0/4459a165", "title": "Modifying Keyboard Layout to Reduce Finger-Travel Distance", "doi": null, "abstractUrl": "/proceedings-article/ictai/2016/4459a165/12OmNvwC5up", "parentPublication": { "id": "proceedings/ictai/2016/4459/0", "title": "2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2018/4235/0/08506501", "title": "The design and evaluation of a gestural keyboard for entering programming code on mobile devices", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2018/08506501/17D45VUZMWc", "parentPublication": { "id": "proceedings/vlhcc/2018/4235/0", "title": "2018 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0/751800a792", "title": "C-SAK: Chinese Scanning Ambiguous Keyboard for Parkinson's Disease Patients", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2018/751800a792/17D45XoXP46", "parentPublication": { "id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0", "title": "2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2018/7308/0/08644590", "title": "Oinput: A Bone-Conductive QWERTY Keyboard Recognition for Wearable Device", "doi": null, "abstractUrl": "/proceedings-article/icpads/2018/08644590/17QjJd2W6Lq", "parentPublication": { "id": "proceedings/icpads/2018/7308/0", "title": "2018 IEEE 24th International Conference on Parallel and Distributed Systems (ICPADS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/09737726", "title": "MyoKey: Inertial Motion Sensing and Gesture-based QWERTY Keyboard for Extended Realities", "doi": null, "abstractUrl": "/journal/tm/5555/01/09737726/1BQlEBR0ceY", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a676", "title": "AiRType: An Air-tapping Keyboard for Augmented Reality Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a676/1CJfr9wrq1i", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcs/2022/7177/0/717700b122", "title": "Implication of Animation on Android Security", "doi": null, "abstractUrl": "/proceedings-article/icdcs/2022/717700b122/1HriKU4GqxG", "parentPublication": { "id": "proceedings/icdcs/2022/7177/0", "title": "2022 IEEE 42nd International Conference on Distributed Computing Systems (ICDCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/10/09347701", "title": "A One-Page Text Entry Method Optimized for Rectangle Smartwatches", "doi": null, "abstractUrl": "/journal/tm/2022/10/09347701/1qWImH2cLGU", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2021/0898/0/089800a435", "title": "A Deep Genetic Method for Keyboard Layout Optimization", "doi": null, "abstractUrl": "/proceedings-article/ictai/2021/089800a435/1zw6eAsr9Ru", "parentPublication": { "id": "proceedings/ictai/2021/0898/0", "title": "2021 IEEE 33rd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwcUjWs", "title": "1990 Thirty-Fifth IEEE Computer Society International Conference on Intellectual Leverage", "acronym": "cmpcon", "groupId": "1000109", "volume": "0", "displayVolume": "0", "year": "1990", "__typename": "ProceedingType" }, "article": { "id": "12OmNzxgHHb", "doi": "10.1109/CMPCON.1990.63749", "title": "Modern virtual screen technology and applications", "normalizedTitle": "Modern virtual screen technology and applications", "abstract": "Virtual screens are information displays in which the apparent image is much larger than the physical device. The image also seems to the user to be located at a distance, even though the display monitor is a small device, worn near the eye on a headset. A recently introduced virtual screen, called Private Eye, appears to overcome the inherent limitation of CRT technology. The Private Eye is light, low power, low voltage, small, and low cost. The Private Eye is intended primarily for commercial use, and the features of this type of display offer considerable benefits for certain classes of personal electronic devices. The display is housed in a rectangular package approximately 1.2*1.3*3.5 in. It can be held to the eye or mounted on a headband. When the display is mounted on a headset, the user's vision is not extensively occluded, and the display is perceived as floating in space a few feet in front of the viewer.<>", "abstracts": [ { "abstractType": "Regular", "content": "Virtual screens are information displays in which the apparent image is much larger than the physical device. The image also seems to the user to be located at a distance, even though the display monitor is a small device, worn near the eye on a headset. A recently introduced virtual screen, called Private Eye, appears to overcome the inherent limitation of CRT technology. The Private Eye is light, low power, low voltage, small, and low cost. The Private Eye is intended primarily for commercial use, and the features of this type of display offer considerable benefits for certain classes of personal electronic devices. The display is housed in a rectangular package approximately 1.2*1.3*3.5 in. It can be held to the eye or mounted on a headband. When the display is mounted on a headset, the user's vision is not extensively occluded, and the display is perceived as floating in space a few feet in front of the viewer.<>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual screens are information displays in which the apparent image is much larger than the physical device. The image also seems to the user to be located at a distance, even though the display monitor is a small device, worn near the eye on a headset. A recently introduced virtual screen, called Private Eye, appears to overcome the inherent limitation of CRT technology. The Private Eye is light, low power, low voltage, small, and low cost. The Private Eye is intended primarily for commercial use, and the features of this type of display offer considerable benefits for certain classes of personal electronic devices. The display is housed in a rectangular package approximately 1.2*1.3*3.5 in. It can be held to the eye or mounted on a headband. When the display is mounted on a headset, the user's vision is not extensively occluded, and the display is perceived as floating in space a few feet in front of the viewer.", "fno": "00063749", "keywords": [ "Display Devices", "Interactive Terminals", "Virtual Screen Technology", "Information Displays", "Display Monitor", "Virtual Screen", "Private Eye", "1 2 Inch", "1 3 Inch", "3 5 Inch", "Cathode Ray Tubes", "Large Screen Displays", "Computer Displays", "Aerospace Electronics", "Foot", "Pixel", "Computerized Monitoring", "Modems", "Optical Reflection", "Costs" ], "authors": [ { "affiliation": "Reflection Technol. Inc., Waltham, MA, USA", "fullName": "A. Becker", "givenName": "A.", "surname": "Becker", "__typename": "ArticleAuthorType" } ], "idPrefix": "cmpcon", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1990-01-01T00:00:00", "pubType": "proceedings", "pages": "612,613,614,615", "year": "1990", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00063748", "articleId": "12OmNCeK2hq", "__typename": "AdjacentArticleType" }, "next": { "fno": "00063750", "articleId": "12OmNzEmFGk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/nbis/2009/3767/0/3767a612", "title": "Study on Realistic Communication Technology with Tiled Displays Wall", "doi": null, "abstractUrl": "/proceedings-article/nbis/2009/3767a612/12OmNqBKUf2", "parentPublication": { "id": "proceedings/nbis/2009/3767/0", "title": "2009 International Conference on Network-Based Information Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480764", "title": "An Evaluation of Immersive Displays for Virtual Human Experiences", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480764/12OmNqFrGvP", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2017/4822/0/07926684", "title": "A Statistical Approach to Continuous Self-Calibrating Eye Gaze Tracking for Head-Mounted Virtual Reality Systems", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926684/12OmNvlxJrb", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2007/0905/0/04161055", "title": "Effects of Interaction-Display Offset on User Performance in Surround Screen Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2007/04161055/12OmNxymo84", "parentPublication": { "id": "proceedings/vr/2007/0905/0", "title": "2007 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sose/2013/4944/0/4944a509", "title": "iScreen: A Merged Screen of Local System with Remote Applications in a Mobile Cloud Environment", "doi": null, "abstractUrl": "/proceedings-article/sose/2013/4944a509/12OmNyVes2r", "parentPublication": { "id": "proceedings/sose/2013/4944/0", "title": "2013 IEEE Seventh International Symposium on Service-Oriented System Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031c411", "title": "The Research of LED Display Screen Based on Embedded Technology and FPGA Technology", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031c411/12OmNzw8iWF", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/1978/11/01646754", "title": "An Overview of Dircted Beam Graphics Display Hardware", "doi": null, "abstractUrl": "/magazine/co/1978/11/01646754/13rRUyp7tZu", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a691", "title": "MoPeDT: A Modular Head-Mounted Display Toolkit to Conduct Peripheral Vision Research", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a691/1MNgl22Q3XG", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797925", "title": "Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797925/1cJ0J09XMdy", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09383112", "title": "Beaming Displays", "doi": null, "abstractUrl": "/journal/tg/2021/05/09383112/1saZzKxYSqI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJbEwHHqEg", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJbKYSq2Vq", "doi": "10.1109/VR51125.2022.00062", "title": "You&#x2019;re in for a Bumpy Ride! Uneven Terrain Increases Cybersickness While Navigating with Head Mounted Displays", "normalizedTitle": "You’re in for a Bumpy Ride! Uneven Terrain Increases Cybersickness While Navigating with Head Mounted Displays", "abstract": "Cybersickness (i.e., visually induced motion sickness) serves as a significant obstacle to the usage and broader adoption of virtual reality (VR) technologies. This collection of symptoms akin to motion sickness can be impacted by different characteristics of a virtual experience, such as visual realism and optical flow. However, relatively little is known regarding how cybersickness is influenced by traversing uneven virtual terrain. In this study, we aim to better understand the impacts of different virtual terrain types on cybersickness in VR. We recruited 38 participants to navigate a virtual forest environment with three terrain variants: flat surface, terrain with regular bumps, and irregular terrain generated from Perlin noise. We collected cybersickness data using the Fast Motion Sickness Scale (FMSS) and Simulator Sickness Questionnaire (SSQ) in addition to galvanic skin response data. Our results indicate that users felt greater levels of cybersickness in the presence of regular bumps and irregular terrain than they did when traversing flat geometry. We recommend that designers exercise caution when incorporating uneven terrain into their virtual experiences, and maintain awareness of the risks carried by these design decisions.", "abstracts": [ { "abstractType": "Regular", "content": "Cybersickness (i.e., visually induced motion sickness) serves as a significant obstacle to the usage and broader adoption of virtual reality (VR) technologies. This collection of symptoms akin to motion sickness can be impacted by different characteristics of a virtual experience, such as visual realism and optical flow. However, relatively little is known regarding how cybersickness is influenced by traversing uneven virtual terrain. In this study, we aim to better understand the impacts of different virtual terrain types on cybersickness in VR. We recruited 38 participants to navigate a virtual forest environment with three terrain variants: flat surface, terrain with regular bumps, and irregular terrain generated from Perlin noise. We collected cybersickness data using the Fast Motion Sickness Scale (FMSS) and Simulator Sickness Questionnaire (SSQ) in addition to galvanic skin response data. Our results indicate that users felt greater levels of cybersickness in the presence of regular bumps and irregular terrain than they did when traversing flat geometry. We recommend that designers exercise caution when incorporating uneven terrain into their virtual experiences, and maintain awareness of the risks carried by these design decisions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Cybersickness (i.e., visually induced motion sickness) serves as a significant obstacle to the usage and broader adoption of virtual reality (VR) technologies. This collection of symptoms akin to motion sickness can be impacted by different characteristics of a virtual experience, such as visual realism and optical flow. However, relatively little is known regarding how cybersickness is influenced by traversing uneven virtual terrain. In this study, we aim to better understand the impacts of different virtual terrain types on cybersickness in VR. We recruited 38 participants to navigate a virtual forest environment with three terrain variants: flat surface, terrain with regular bumps, and irregular terrain generated from Perlin noise. We collected cybersickness data using the Fast Motion Sickness Scale (FMSS) and Simulator Sickness Questionnaire (SSQ) in addition to galvanic skin response data. Our results indicate that users felt greater levels of cybersickness in the presence of regular bumps and irregular terrain than they did when traversing flat geometry. We recommend that designers exercise caution when incorporating uneven terrain into their virtual experiences, and maintain awareness of the risks carried by these design decisions.", "fno": "961700a428", "keywords": [ "Helmet Mounted Displays", "Human Factors", "Skin", "Virtual Reality", "Different Virtual Terrain Types", "VR", "Virtual Forest Environment", "Terrain Variants", "Regular Bumps", "Irregular Terrain", "Cybersickness Data", "Fast Motion Sickness Scale", "Simulator Sickness Questionnaire", "Traversing Flat Geometry", "Virtual Experience", "Bumpy Ride", "Uneven Terrain Increases Cybersickness", "Head Mounted Displays", "Significant Obstacle", "Broader Adoption", "Virtual Reality Technologies", "Visual Realism", "Uneven Virtual Terrain", "Visualization", "Three Dimensional Displays", "Cybersickness", "Navigation", "Multimedia Systems", "Virtual Environments", "User Interfaces", "H 5 1 INFORMATION INTERFACES AND PRESENTATION E G HCI Multimedia Information Systems", "Artificial Augmented And Virtual Realities" ], "authors": [ { "affiliation": "The University of Texas at San Antonio", "fullName": "Samuel Ang", "givenName": "Samuel", "surname": "Ang", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas at San Antonio", "fullName": "John Quarles", "givenName": "John", "surname": "Quarles", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "428-435", "year": "2022", "issn": null, "isbn": "978-1-6654-9617-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "961700a419", "articleId": "1CJc6iY0lI4", "__typename": "AdjacentArticleType" }, "next": { "fno": "961700a436", "articleId": "1CJbVhCZuqA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2009/3789/0/3789a486", "title": "Estimating Cybersickness of Simulated Motion Using the Simulator Sickness Questionnaire (SSQ): A Controlled Study", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2009/3789a486/12OmNAOKnYL", "parentPublication": { "id": "proceedings/cgiv/2009/3789/0", "title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446192", "title": "Using Cybersickness Indicators to Adapt Navigation in Virtual Reality: A Pre-Study", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446192/13bd1eSlyt4", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446206", "title": "A Preliminary Investigation of the Effects of Discrete Virtual Rotation on Cybersickness", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446206/13bd1fKQxqW", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699319", "title": "Effect of Navigation Speed and VR Devices on Cybersickness", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699319/19F1OrW6KxW", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a528", "title": "Human Factors Related to Cybersickness Tolerance in Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a528/1CJcDQEpCqA", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a794", "title": "An Investigation on the Relationship between Cybersickness and Heart Rate Variability When Navigating a Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a794/1J7We4du3FC", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a307", "title": "Demographic and Behavioral Correlates of Cybersickness: A Large Lab-in-the-Field Study of 837 Participants", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a307/1JrRjge0g6I", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a658", "title": "Like a Rolling Stone: Effects of Space Deformation During Linear Acceleration on Slope Perception and Cybersickness", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a658/1MNgN5chdRu", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/massw/2019/4121/0/412100a072", "title": "Cognitive Distraction to Improve Cybersickness in Virtual Reality Environment", "doi": null, "abstractUrl": "/proceedings-article/massw/2019/412100a072/1iTvAIC4RJ6", "parentPublication": { "id": "proceedings/massw/2019/4121/0", "title": "2019 IEEE 16th International Conference on Mobile Ad Hoc and Sensor Systems Workshops (MASSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089513", "title": "Comparative Evaluation of the Effects of Motion Control on Cybersickness in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089513/1jIx7SE9LiM", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J7W6LmbCw0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "9973799", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J7We4du3FC", "doi": "10.1109/ISMAR-Adjunct57072.2022.00169", "title": "An Investigation on the Relationship between Cybersickness and Heart Rate Variability When Navigating a Virtual Environment", "normalizedTitle": "An Investigation on the Relationship between Cybersickness and Heart Rate Variability When Navigating a Virtual Environment", "abstract": "In this study, we explore the physiological effect of cybersickness using heart rate variability (HRV) and examine the relationship between the physiological and subjective measurement of cybersickness when navigating a virtual environment (VE). To achieve these objectives, we conducted two experiments. In experiment 1, four types of forwarding translation method (teleportation &#x002B; embodied control [EC], steering &#x002B; EC, teleportation &#x002B; instrumental control [IC], and steering &#x002B; IC) were compared, while six types of rotation (yaw, pitch, roll, yaw &#x002B; pitch, yaw &#x002B; roll, and pitch &#x002B; roll) were examined in experiment 2. The HRV analysis for experiment 1 revealed that the steering conditions yielded significantly higher low-frequency (LF) power (ms<sup>2</sup>) than did the teleportation condition. In addition, the simulator sickness questionnaire (SSQ) results showed that the steering conditions produced stronger symptoms of cybersickness than did the teleportation conditions. For experiment 2, the yaw &#x002B; roll and pitch &#x002B; roll conditions had greater LF power (ms<sup>2</sup>) than did the other four conditions. However, the SSQ indicated that the level of cybersickness was higher for the roll, yaw &#x002B; roll, and pitch &#x002B; roll conditions than for the other rotation conditions. The participants completed the four and six conditions over a limited period across two experimental days. However, the overall HRV and SSQ results for both experiments indicated no explicit additive effects of sickness symptoms. Based on these outcomes, this study verified the relationship between cybersickness, HRV, and SSQ during navigation in a VE.", "abstracts": [ { "abstractType": "Regular", "content": "In this study, we explore the physiological effect of cybersickness using heart rate variability (HRV) and examine the relationship between the physiological and subjective measurement of cybersickness when navigating a virtual environment (VE). To achieve these objectives, we conducted two experiments. In experiment 1, four types of forwarding translation method (teleportation &#x002B; embodied control [EC], steering &#x002B; EC, teleportation &#x002B; instrumental control [IC], and steering &#x002B; IC) were compared, while six types of rotation (yaw, pitch, roll, yaw &#x002B; pitch, yaw &#x002B; roll, and pitch &#x002B; roll) were examined in experiment 2. The HRV analysis for experiment 1 revealed that the steering conditions yielded significantly higher low-frequency (LF) power (ms<sup>2</sup>) than did the teleportation condition. In addition, the simulator sickness questionnaire (SSQ) results showed that the steering conditions produced stronger symptoms of cybersickness than did the teleportation conditions. For experiment 2, the yaw &#x002B; roll and pitch &#x002B; roll conditions had greater LF power (ms<sup>2</sup>) than did the other four conditions. However, the SSQ indicated that the level of cybersickness was higher for the roll, yaw &#x002B; roll, and pitch &#x002B; roll conditions than for the other rotation conditions. The participants completed the four and six conditions over a limited period across two experimental days. However, the overall HRV and SSQ results for both experiments indicated no explicit additive effects of sickness symptoms. Based on these outcomes, this study verified the relationship between cybersickness, HRV, and SSQ during navigation in a VE.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this study, we explore the physiological effect of cybersickness using heart rate variability (HRV) and examine the relationship between the physiological and subjective measurement of cybersickness when navigating a virtual environment (VE). To achieve these objectives, we conducted two experiments. In experiment 1, four types of forwarding translation method (teleportation + embodied control [EC], steering + EC, teleportation + instrumental control [IC], and steering + IC) were compared, while six types of rotation (yaw, pitch, roll, yaw + pitch, yaw + roll, and pitch + roll) were examined in experiment 2. The HRV analysis for experiment 1 revealed that the steering conditions yielded significantly higher low-frequency (LF) power (ms2) than did the teleportation condition. In addition, the simulator sickness questionnaire (SSQ) results showed that the steering conditions produced stronger symptoms of cybersickness than did the teleportation conditions. For experiment 2, the yaw + roll and pitch + roll conditions had greater LF power (ms2) than did the other four conditions. However, the SSQ indicated that the level of cybersickness was higher for the roll, yaw + roll, and pitch + roll conditions than for the other rotation conditions. The participants completed the four and six conditions over a limited period across two experimental days. However, the overall HRV and SSQ results for both experiments indicated no explicit additive effects of sickness symptoms. Based on these outcomes, this study verified the relationship between cybersickness, HRV, and SSQ during navigation in a VE.", "fno": "536500a794", "keywords": [ "Cardiology", "Electrocardiography", "Ergonomics", "Human Factors", "Medical Signal Processing", "Psychology", "Virtual Reality", "Cybersickness", "Forwarding Translation Method", "Heart Rate Variability", "HRV Analysis", "Low Frequency Power", "Navigating", "Navigation", "Physiological Effect", "Physiological Measurement", "Pitch Roll Conditions", "Rotation Conditions", "Simulator Sickness Questionnaire Results", "SSQ", "Steering IC", "Steering Conditions", "Subjective Measurement", "Teleportation Instrumental Control IC", "Teleportation Condition", "Virtual Environment", "Yaw Pitch", "Yaw Roll", "Integrated Circuits", "Graphics", "Solid Modeling", "Cybersickness", "Navigation", "Instruments", "Virtual Environments", "Cybersickness", "Heart Rate Variability", "Virtual Reality", "H 1 2 Models And Principles User Machine Systems Human Factors", "I 3 7 Computing Graphics Three Dimensional Graphics And Realism Virtual Reality" ], "authors": [ { "affiliation": "Institute for Cognitive Science", "fullName": "Aelee Kim", "givenName": "Aelee", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "Seoul National University,Interdisciplinary Study of Cognitive Science,Rep. of Korea", "fullName": "Jeong-Eun Lee", "givenName": "Jeong-Eun", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "Seoul National University,Interdisciplinary Study of Cognitive Science,Rep. of Korea", "fullName": "Kyoung-Min Lee", "givenName": "Kyoung-Min", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "794-797", "year": "2022", "issn": null, "isbn": "978-1-6654-5365-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "536500a788", "articleId": "1J7Wrgz23FC", "__typename": "AdjacentArticleType" }, "next": { "fno": "536500a798", "articleId": "1J7W9mGSh9u", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2009/3789/0/3789a486", "title": "Estimating Cybersickness of Simulated Motion Using the Simulator Sickness Questionnaire (SSQ): A Controlled Study", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2009/3789a486/12OmNAOKnYL", "parentPublication": { "id": "proceedings/cgiv/2009/3789/0", "title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/1999/0093/0/00930237", "title": "Cybersickness: An Experimental Study to Isolate the Effects of Rotational Scene Oscillations", "doi": null, "abstractUrl": "/proceedings-article/vr/1999/00930237/12OmNvFHfIa", "parentPublication": { "id": "proceedings/vr/1999/0093/0", "title": "Proceedings of Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699319", "title": "Effect of Navigation Speed and VR Devices on Cybersickness", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699319/19F1OrW6KxW", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09737429", "title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness", "doi": null, "abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a428", "title": "You&#x2019;re in for a Bumpy Ride! Uneven Terrain Increases Cybersickness While Navigating with Head Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a428/1CJbKYSq2Vq", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a528", "title": "Human Factors Related to Cybersickness Tolerance in Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a528/1CJcDQEpCqA", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a307", "title": "Demographic and Behavioral Correlates of Cybersickness: A Large Lab-in-the-Field Study of 837 Participants", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a307/1JrRjge0g6I", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09386008", "title": "Floor-vibration VR: Mitigating Cybersickness Using Whole-body Tactile Stimuli in Highly Realistic Vehicle Driving Experiences", "doi": null, "abstractUrl": "/journal/tg/2021/05/09386008/1seiz94oUco", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a373", "title": "Using Fuzzy Logic to Involve Individual Differences for Predicting Cybersickness during VR Navigation", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a373/1tuAPQPWR2g", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2022/06/09580681", "title": "Modeling and Defense of Social Virtual Reality Attacks Inducing Cybersickness", "doi": null, "abstractUrl": "/journal/tq/2022/06/09580681/1xPo5KfQN1K", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1JrQPhTSspy", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1JrR1CsIUjC", "doi": "10.1109/ISMAR55827.2022.00096", "title": "TruVR: Trustworthy Cybersickness Detection using Explainable Machine Learning", "normalizedTitle": "TruVR: Trustworthy Cybersickness Detection using Explainable Machine Learning", "abstract": "Cybersickness can be characterized by nausea, vertigo, headache, eye strain, and other discomforts when using virtual reality (VR) systems. The previously reported machine learning (ML) and deep learning (DL) algorithms for detecting (classification) and predicting (regression) VR cybersickness use black-box models; thus, they lack explainability. Moreover, VR sensors generate a massive amount of data, resulting in complex and large models. Therefore, having inherent explainability in cybersickness detection models can significantly improve the model&#x2019;s trustworthiness and provide insight into why and how the ML/DL model amved at a specific decision. To address this issue, we present three explainable machine learning (xML) models to detect and predict cybersickness: 1) explainable boosting machine (EBM), 2) decision tree (DT), and 3) logistic regression (LR). We evaluate xML-based models with publicly available physiological and gameplay datasets for cybersickness. The results show that the EBM can detect cybersickness with an accuracy of 99.75&#x0025; and 94.10&#x0025; for the physiological and gameplay datasets, respectively. On the other hand, while predicting the cybersickness, EBM resulted in a Root Mean Square Error (RMSE) of 0.071 for the physiological dataset and 0.27 for the gameplay dataset. Furthermore, the EBM-based global explanation reveals exposure length, rotation, and acceleration as key features causing cybersickness in the gameplay dataset. In contrast, galvanic skin responses and heart rate are most significant in the physiological dataset. Our results also suggest that EBM-based local explanation can identify cybersickness-causing factors for individual samples. We believe the proposed xML-based cybersickness detection method can help future researchers understand, analyze, and design simpler cybersickness detection and reduction models.", "abstracts": [ { "abstractType": "Regular", "content": "Cybersickness can be characterized by nausea, vertigo, headache, eye strain, and other discomforts when using virtual reality (VR) systems. The previously reported machine learning (ML) and deep learning (DL) algorithms for detecting (classification) and predicting (regression) VR cybersickness use black-box models; thus, they lack explainability. Moreover, VR sensors generate a massive amount of data, resulting in complex and large models. Therefore, having inherent explainability in cybersickness detection models can significantly improve the model&#x2019;s trustworthiness and provide insight into why and how the ML/DL model amved at a specific decision. To address this issue, we present three explainable machine learning (xML) models to detect and predict cybersickness: 1) explainable boosting machine (EBM), 2) decision tree (DT), and 3) logistic regression (LR). We evaluate xML-based models with publicly available physiological and gameplay datasets for cybersickness. The results show that the EBM can detect cybersickness with an accuracy of 99.75&#x0025; and 94.10&#x0025; for the physiological and gameplay datasets, respectively. On the other hand, while predicting the cybersickness, EBM resulted in a Root Mean Square Error (RMSE) of 0.071 for the physiological dataset and 0.27 for the gameplay dataset. Furthermore, the EBM-based global explanation reveals exposure length, rotation, and acceleration as key features causing cybersickness in the gameplay dataset. In contrast, galvanic skin responses and heart rate are most significant in the physiological dataset. Our results also suggest that EBM-based local explanation can identify cybersickness-causing factors for individual samples. We believe the proposed xML-based cybersickness detection method can help future researchers understand, analyze, and design simpler cybersickness detection and reduction models.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Cybersickness can be characterized by nausea, vertigo, headache, eye strain, and other discomforts when using virtual reality (VR) systems. The previously reported machine learning (ML) and deep learning (DL) algorithms for detecting (classification) and predicting (regression) VR cybersickness use black-box models; thus, they lack explainability. Moreover, VR sensors generate a massive amount of data, resulting in complex and large models. Therefore, having inherent explainability in cybersickness detection models can significantly improve the model’s trustworthiness and provide insight into why and how the ML/DL model amved at a specific decision. To address this issue, we present three explainable machine learning (xML) models to detect and predict cybersickness: 1) explainable boosting machine (EBM), 2) decision tree (DT), and 3) logistic regression (LR). We evaluate xML-based models with publicly available physiological and gameplay datasets for cybersickness. The results show that the EBM can detect cybersickness with an accuracy of 99.75% and 94.10% for the physiological and gameplay datasets, respectively. On the other hand, while predicting the cybersickness, EBM resulted in a Root Mean Square Error (RMSE) of 0.071 for the physiological dataset and 0.27 for the gameplay dataset. Furthermore, the EBM-based global explanation reveals exposure length, rotation, and acceleration as key features causing cybersickness in the gameplay dataset. In contrast, galvanic skin responses and heart rate are most significant in the physiological dataset. Our results also suggest that EBM-based local explanation can identify cybersickness-causing factors for individual samples. We believe the proposed xML-based cybersickness detection method can help future researchers understand, analyze, and design simpler cybersickness detection and reduction models.", "fno": "532500a777", "keywords": [ "Decision Trees", "Deep Learning Artificial Intelligence", "Ergonomics", "Mean Square Error Methods", "Pattern Classification", "Regression Analysis", "Skin", "Virtual Reality", "XML", "Black Box Models", "Cybersickness Detection Models", "Cybersickness Causing Factors", "Design Simpler Cybersickness Detection", "EBM", "Explainable Machine Learning Models", "Gameplay Dataset", "Inherent Explainability", "Large Models", "Physiological Dataset", "Publicly Available Physiological Gameplay Datasets", "Reduction Models", "Trustworthy Cybersickness Detection", "Tru VR", "Virtual Reality Systems", "VR Cybersickness", "X ML Based Cybersickness Detection Method", "X ML Based Models", "Solid Modeling", "Cybersickness", "XML", "Predictive Models", "Sensor Phenomena And Characterization", "Physiology", "Skin", "Virtual Reality", "Cybersickness", "Explainable Machine Learning", "Cybersickness Detection", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Virtual Reality", "HCI Design And Evaluation Methods" ], "authors": [ { "affiliation": "University of Missouri-Columbia", "fullName": "Ripan Kumar Kundu", "givenName": "Ripan Kumar", "surname": "Kundu", "__typename": "ArticleAuthorType" }, { "affiliation": "Northeastern University", "fullName": "Rifatul Islam", "givenName": "Rifatul", "surname": "Islam", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Missouri-Columbia", "fullName": "Prasad Calyam", "givenName": "Prasad", "surname": "Calyam", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Missouri-Columbia", "fullName": "Khaza Anuarul Hoque", "givenName": "Khaza Anuarul", "surname": "Hoque", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "777-786", "year": "2022", "issn": "1554-7868", "isbn": "978-1-6654-5325-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "532500a768", "articleId": "1JrQWVGWWg8", "__typename": "AdjacentArticleType" }, "next": { "fno": "532500a787", "articleId": "1JrRgFp6G2s", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2023/4815/0/481500a561", "title": "You Make Me Sick&#x0021; The Effect of Stairs on Presence, Cybersickness, and Perception of Embodied Conversational Agents", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a561/1MNgq5zE1BS", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a609", "title": "LiteVR: Interpretable and Lightweight Cybersickness Detection using Explainable AI", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a609/1MNgzF7scM0", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090607", "title": "Exploring Blink-Rate Behaviors for Cybersickness Detection in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090607/1jIxAfKUpbO", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090495", "title": "Automatic Detection of Cybersickness from Physiological Signal in a Virtual Roller Coaster Simulation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090495/1jIximIpClq", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090494", "title": "A Deep Learning based Framework for Detecting and Reducing onset of Cybersickness", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090494/1jIxuKp865y", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a400", "title": "Automatic Detection and Prediction of Cybersickness Severity using Deep Neural Networks from user&#x2019;s Physiological Signals", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a400/1pyswQ0oYOQ", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09386008", "title": "Floor-vibration VR: Mitigating Cybersickness Using Whole-body Tactile Stimuli in Highly Realistic Vehicle Driving Experiences", "doi": null, "abstractUrl": "/journal/tg/2021/05/09386008/1seiz94oUco", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a148", "title": "CyberSense: A Closed-Loop Framework to Detect Cybersickness Severity and Adaptively apply Reduction Techniques", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a148/1tnWZDrIad2", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a486", "title": "Visual Techniques to Reduce Cybersickness in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a486/1tnXnofrJRu", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a031", "title": "Cybersickness Prediction from Integrated HMD&#x2019;s Sensors: A Multimodal Deep Fusion Approach using Eye-tracking and Head-tracking Data", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a031/1yeCV8NQEE0", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pystLSz19C", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pyswQ0oYOQ", "doi": "10.1109/ISMAR50242.2020.00066", "title": "Automatic Detection and Prediction of Cybersickness Severity using Deep Neural Networks from user&#x2019;s Physiological Signals", "normalizedTitle": "Automatic Detection and Prediction of Cybersickness Severity using Deep Neural Networks from user’s Physiological Signals", "abstract": "Cybersickness is one of the primary challenges to the usability and acceptability of virtual reality (VR). Cybersickness can cause motion sickness-like discomforts, including disorientation, headache, nausea, and fatigue, both during and after the VR immersion. Prior research suggested a significant correlation between physiological signals and cybersickness severity, as measured by the simulator sickness questionnaire (SSQ). However, SSQ may not be suitable for automatic detection of cybersickness severity during immersion, as it is usually reported before and after the immersion. In this study, we introduced an automated approach for the detection and prediction of cybersickness severity from the user's physiological signals. We collected heart rate, breathing rate, heart rate variability, and galvanic skin response data from 31 healthy participants while immersed in a VR roller coaster simulation. We found a significant difference in the participants' physiological signals during their cybersickness state compared to their resting baseline. We compared a support vector machine classifier and three deep neural classifiers for cybersickness severity detection and prediction in two minutes' future, given the previous two minutes of physiological signals. Our proposed simplified convolutional long short-term memory classifier achieved an accuracy of 97.44% for detecting current cybersickness severity and 87.38% for predicting future cybersickness severity from the physiological signals.", "abstracts": [ { "abstractType": "Regular", "content": "Cybersickness is one of the primary challenges to the usability and acceptability of virtual reality (VR). Cybersickness can cause motion sickness-like discomforts, including disorientation, headache, nausea, and fatigue, both during and after the VR immersion. Prior research suggested a significant correlation between physiological signals and cybersickness severity, as measured by the simulator sickness questionnaire (SSQ). However, SSQ may not be suitable for automatic detection of cybersickness severity during immersion, as it is usually reported before and after the immersion. In this study, we introduced an automated approach for the detection and prediction of cybersickness severity from the user's physiological signals. We collected heart rate, breathing rate, heart rate variability, and galvanic skin response data from 31 healthy participants while immersed in a VR roller coaster simulation. We found a significant difference in the participants' physiological signals during their cybersickness state compared to their resting baseline. We compared a support vector machine classifier and three deep neural classifiers for cybersickness severity detection and prediction in two minutes' future, given the previous two minutes of physiological signals. Our proposed simplified convolutional long short-term memory classifier achieved an accuracy of 97.44% for detecting current cybersickness severity and 87.38% for predicting future cybersickness severity from the physiological signals.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Cybersickness is one of the primary challenges to the usability and acceptability of virtual reality (VR). Cybersickness can cause motion sickness-like discomforts, including disorientation, headache, nausea, and fatigue, both during and after the VR immersion. Prior research suggested a significant correlation between physiological signals and cybersickness severity, as measured by the simulator sickness questionnaire (SSQ). However, SSQ may not be suitable for automatic detection of cybersickness severity during immersion, as it is usually reported before and after the immersion. In this study, we introduced an automated approach for the detection and prediction of cybersickness severity from the user's physiological signals. We collected heart rate, breathing rate, heart rate variability, and galvanic skin response data from 31 healthy participants while immersed in a VR roller coaster simulation. We found a significant difference in the participants' physiological signals during their cybersickness state compared to their resting baseline. We compared a support vector machine classifier and three deep neural classifiers for cybersickness severity detection and prediction in two minutes' future, given the previous two minutes of physiological signals. Our proposed simplified convolutional long short-term memory classifier achieved an accuracy of 97.44% for detecting current cybersickness severity and 87.38% for predicting future cybersickness severity from the physiological signals.", "fno": "850800a400", "keywords": [ "Cardiology", "Human Factors", "Medical Signal Processing", "Neural Nets", "Neurophysiology", "Pattern Classification", "Skin", "Support Vector Machines", "Virtual Reality", "Deep Neural Networks", "Physiological Signals", "VR Immersion", "Automatic Detection", "VR Roller Coaster Simulation", "Participants", "Cybersickness State", "Cybersickness Severity Detection", "Current Cybersickness Severity", "Future Cybersickness Severity", "SSQ", "Simulator Sickness Questionnaire", "Healthy Participants", "Support Vector Machines", "Solid Modeling", "Neural Networks", "Physiology", "Skin", "Usability", "Heart Rate Variability", "Virtual Reality", "Automated Cybersickness Detection", "Visually Induced Motion Sickness", "Deep Neural Network" ], "authors": [ { "affiliation": "The University of Texas,San Antonio", "fullName": "Rifatul Islam", "givenName": "Rifatul", "surname": "Islam", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas,San Antonio", "fullName": "Yonggun Lee", "givenName": "Yonggun", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas,San Antonio", "fullName": "Mehrad Jaloli", "givenName": "Mehrad", "surname": "Jaloli", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas,San Antonio", "fullName": "Imtiaz Muhammad", "givenName": "Imtiaz", "surname": "Muhammad", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas,San Antonio", "fullName": "Dakai Zhu", "givenName": "Dakai", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas,San Antonio", "fullName": "Paul Rad", "givenName": "Paul", "surname": "Rad", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas,San Antonio", "fullName": "Yufei Huang", "givenName": "Yufei", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas,San Antonio", "fullName": "John Quarles", "givenName": "John", "surname": "Quarles", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "400-411", "year": "2020", "issn": "1554-7868", "isbn": "978-1-7281-8508-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "850800a387", "articleId": "1pysy0d2Nck", "__typename": "AdjacentArticleType" }, "next": { "fno": "850800a412", "articleId": "1pyswqrEtCE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2015/9953/0/07344692", "title": "PhysSigTK: Enabling engagement experiments with physiological signals for game design", "doi": null, "abstractUrl": "/proceedings-article/acii/2015/07344692/12OmNBpVQ7Y", "parentPublication": { "id": "proceedings/acii/2015/9953/0", "title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aciiw/2017/0680/0/08272612", "title": "Detection of universal cross-cultural depression indicators from the physiological signals of observers", "doi": null, "abstractUrl": "/proceedings-article/aciiw/2017/08272612/12OmNvwkulY", "parentPublication": { "id": "proceedings/aciiw/2017/0680/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sip/2015/9855/0/9855a021", "title": "Actual Emotion and False Emotion Classification by Physiological Signal", "doi": null, "abstractUrl": "/proceedings-article/sip/2015/9855a021/12OmNwF0BUf", "parentPublication": { "id": "proceedings/sip/2015/9855/0", "title": "2015 8th International Conference on Signal Processing, Image Processing and Pattern Recognition (SIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2015/02/06821267", "title": "Neuroticism, Extraversion, Conscientiousness and Stress: Physiological Correlates", "doi": null, "abstractUrl": "/journal/ta/2015/02/06821267/13rRUwjoNvd", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciibms/2018/7516/3/08549968", "title": "Identifying Severity Level of Cybersickness from EEG signals using CN2 Rule Induction Algorithm", "doi": null, "abstractUrl": "/proceedings-article/iciibms/2018/08549968/17D45VsBU0I", "parentPublication": { "id": "proceedings/iciibms/2018/7516/3", "title": "2018 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a777", "title": "TruVR: Trustworthy Cybersickness Detection using Explainable Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a777/1JrR1CsIUjC", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090495", "title": "Automatic Detection of Cybersickness from Physiological Signal in a Virtual Roller Coaster Simulation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090495/1jIximIpClq", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090494", "title": "A Deep Learning based Framework for Detecting and Reducing onset of Cybersickness", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090494/1jIxuKp865y", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a148", "title": "CyberSense: A Closed-Loop Framework to Detect Cybersickness Severity and Adaptively apply Reduction Techniques", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a148/1tnWZDrIad2", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a031", "title": "Cybersickness Prediction from Integrated HMD&#x2019;s Sensors: A Multimodal Deep Fusion Approach using Eye-tracking and Head-tracking Data", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a031/1yeCV8NQEE0", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qpzz6dhLLq", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qpzC7sA2C4", "doi": "10.1109/AIVR50618.2020.00034", "title": "A Review of Electrostimulation-based Cybersickness Mitigations", "normalizedTitle": "A Review of Electrostimulation-based Cybersickness Mitigations", "abstract": "With the development of consumer virtual reality (VR), people have increasing opportunities to experience cybersickness (CS) -- a kind of visually induced motion sickness (MS). In view of the importance of CS mitigation (CSM), this paper reviews the methods of electrostimulation-based CSM (e-CSM), broadly categorised as either &#x201C;VR-centric&#x201D; or &#x201C;Human-centric&#x201D;. &#x201C;VR-centric&#x201D; refers to approaches where knowledge regarding the visual motion being experienced in VR directly affects how the neurostimulation is delivered, whereas &#x201C;Human-centric&#x201D; approaches focus on the inhibition or enhancement of human functions per se without knowledge of the experienced visual motion. We DIFFERENT E-found that 1) most e-CSM approaches are based on visual-vestibular sensory conflict theory -- one of the generally-accepted aetiologies of MS, 2) the majority of eCSM approaches are vestibular system-centric, either stimulating it to compensate for the mismatched vestibular sensory responses, or inhibiting it to make an artificial and temporary dysfunction in vestibular sensory organs or cortical areas, 3) Vestibular sensory organbased solutions are able to mitigate CS with immediate effect, while the real-time effect of vestibular cortical areas-based methods remains unclear, due to limited public data, 4) Based on subjective assessment, VRcentric approaches could relieve all three kinds of symptoms (nausea, oculomotor, and disorientation), which appears superior to the human-centric ones that could only alleviate one of the symptom types or just have an overall relief effect. Finally, we propose promising future research directions in the development of e-CSM.", "abstracts": [ { "abstractType": "Regular", "content": "With the development of consumer virtual reality (VR), people have increasing opportunities to experience cybersickness (CS) -- a kind of visually induced motion sickness (MS). In view of the importance of CS mitigation (CSM), this paper reviews the methods of electrostimulation-based CSM (e-CSM), broadly categorised as either &#x201C;VR-centric&#x201D; or &#x201C;Human-centric&#x201D;. &#x201C;VR-centric&#x201D; refers to approaches where knowledge regarding the visual motion being experienced in VR directly affects how the neurostimulation is delivered, whereas &#x201C;Human-centric&#x201D; approaches focus on the inhibition or enhancement of human functions per se without knowledge of the experienced visual motion. We DIFFERENT E-found that 1) most e-CSM approaches are based on visual-vestibular sensory conflict theory -- one of the generally-accepted aetiologies of MS, 2) the majority of eCSM approaches are vestibular system-centric, either stimulating it to compensate for the mismatched vestibular sensory responses, or inhibiting it to make an artificial and temporary dysfunction in vestibular sensory organs or cortical areas, 3) Vestibular sensory organbased solutions are able to mitigate CS with immediate effect, while the real-time effect of vestibular cortical areas-based methods remains unclear, due to limited public data, 4) Based on subjective assessment, VRcentric approaches could relieve all three kinds of symptoms (nausea, oculomotor, and disorientation), which appears superior to the human-centric ones that could only alleviate one of the symptom types or just have an overall relief effect. Finally, we propose promising future research directions in the development of e-CSM.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the development of consumer virtual reality (VR), people have increasing opportunities to experience cybersickness (CS) -- a kind of visually induced motion sickness (MS). In view of the importance of CS mitigation (CSM), this paper reviews the methods of electrostimulation-based CSM (e-CSM), broadly categorised as either “VR-centric” or “Human-centric”. “VR-centric” refers to approaches where knowledge regarding the visual motion being experienced in VR directly affects how the neurostimulation is delivered, whereas “Human-centric” approaches focus on the inhibition or enhancement of human functions per se without knowledge of the experienced visual motion. We DIFFERENT E-found that 1) most e-CSM approaches are based on visual-vestibular sensory conflict theory -- one of the generally-accepted aetiologies of MS, 2) the majority of eCSM approaches are vestibular system-centric, either stimulating it to compensate for the mismatched vestibular sensory responses, or inhibiting it to make an artificial and temporary dysfunction in vestibular sensory organs or cortical areas, 3) Vestibular sensory organbased solutions are able to mitigate CS with immediate effect, while the real-time effect of vestibular cortical areas-based methods remains unclear, due to limited public data, 4) Based on subjective assessment, VRcentric approaches could relieve all three kinds of symptoms (nausea, oculomotor, and disorientation), which appears superior to the human-centric ones that could only alleviate one of the symptom types or just have an overall relief effect. Finally, we propose promising future research directions in the development of e-CSM.", "fno": "746300a151", "keywords": [ "Human Factors", "Virtual Reality", "Visually Induced Motion Sickness", "E CSM Approaches", "Artificial Dysfunction", "Temporary Dysfunction", "Vestibular Sensory Responses", "Human Centric Approaches", "Electrostimulation Based CSM", "Electrostimulation Based Cybersickness Mitigations", "Visual Vestibular Sensory Conflict Theory", "Visualization", "Electrodes", "Cybersickness", "Noise Measurement", "Magnetic Heads", "Neck", "Reliability Theory", "Virtual Reality", "Cybersickness", "Mitigation", "Galvanic Vestibular Stimulation", "Transcranial Direct Current Stimulation" ], "authors": [ { "affiliation": "University of Glasgow,School of Pyschology School of Computing Science,Glasgow,UK", "fullName": "Gang Li", "givenName": "Gang", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Glasgow,School of Computing Science,Glasgow,UK", "fullName": "Mark McGill", "givenName": "Mark", "surname": "McGill", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Glasgow,School of Computing Science,Glasgow,UK", "fullName": "Stephen Brewster", "givenName": "Stephen", "surname": "Brewster", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Glasgow,School of Pyschology,Glasgow,UK", "fullName": "Frank Pollick", "givenName": "Frank", "surname": "Pollick", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "151-157", "year": "2020", "issn": null, "isbn": "978-1-7281-7463-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "746300a146", "articleId": "1qpzBPPsMHC", "__typename": "AdjacentArticleType" }, "next": { "fno": "746300a158", "articleId": "1qpzzMACVIA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446269", "title": "A Study of Cybersickness and Sensory Conflict Theory Using a Motion-Coupled Virtual Reality System", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446269/13bd1eTtWYf", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714040", "title": "Omnidirectional Galvanic Vestibular Stimulation in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714040/1B0Y04eka8E", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09737429", "title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness", "doi": null, "abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2022/02/09779506", "title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset", "doi": null, "abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049731", "title": "Cybersickness, Cognition, &#x0026; Motor Skills: The Effects of Music, Gender, and Gaming Experience", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049731/1KYow8CUV20", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089551", "title": "A Structural Equation Modeling Approach to Understand the Relationship between Control, Cybersickness and Presence in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089551/1jIx95ncylO", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a123", "title": "Exploring the feasibility of mitigating VR-HMD-induced cybersickness using cathodal transcranial direct current stimulation", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a123/1qpzDMNZnKo", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a351", "title": "A Review of Deep Learning Approaches to EEG-Based Classification of Cybersickness in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a351/1qpzzTXUIgw", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09386008", "title": "Floor-vibration VR: Mitigating Cybersickness Using Whole-body Tactile Stimuli in Highly Realistic Vehicle Driving Experiences", "doi": null, "abstractUrl": "/journal/tg/2021/05/09386008/1seiz94oUco", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a156", "title": "A new device to restore sensory congruency in virtual reality and to prevent cybersickness", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a156/1tnWwDLMCAw", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1i5mkDyiIUg", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1i5mOYR5gre", "doi": "10.1109/ICCVW.2019.00455", "title": "EyeNet: A Multi-Task Deep Network for Off-Axis Eye Gaze Estimation", "normalizedTitle": "EyeNet: A Multi-Task Deep Network for Off-Axis Eye Gaze Estimation", "abstract": "Eye gaze estimation is a crucial component in Virtual and Mixed Reality. In head-mounted VR/MR devices the eyes are imaged off-axis to avoid blocking the user's gaze, this view-point makes drawing eye related inferences very challenging. In this work, we present EyeNet, the first single deep neural network which solves multiple heterogeneous tasks related to eye gaze estimation for an off-axis camera setting. The tasks include eye segmentation, IR LED glints detection, pupil and cornea center estimation. We benchmark all tasks on MagicEyes, a large and new dataset of 587 subjects with varying morphology, gender, skin-color, make-up and imaging conditions.", "abstracts": [ { "abstractType": "Regular", "content": "Eye gaze estimation is a crucial component in Virtual and Mixed Reality. In head-mounted VR/MR devices the eyes are imaged off-axis to avoid blocking the user's gaze, this view-point makes drawing eye related inferences very challenging. In this work, we present EyeNet, the first single deep neural network which solves multiple heterogeneous tasks related to eye gaze estimation for an off-axis camera setting. The tasks include eye segmentation, IR LED glints detection, pupil and cornea center estimation. We benchmark all tasks on MagicEyes, a large and new dataset of 587 subjects with varying morphology, gender, skin-color, make-up and imaging conditions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Eye gaze estimation is a crucial component in Virtual and Mixed Reality. In head-mounted VR/MR devices the eyes are imaged off-axis to avoid blocking the user's gaze, this view-point makes drawing eye related inferences very challenging. In this work, we present EyeNet, the first single deep neural network which solves multiple heterogeneous tasks related to eye gaze estimation for an off-axis camera setting. The tasks include eye segmentation, IR LED glints detection, pupil and cornea center estimation. We benchmark all tasks on MagicEyes, a large and new dataset of 587 subjects with varying morphology, gender, skin-color, make-up and imaging conditions.", "fno": "502300d683", "keywords": [ "Cameras", "Eye", "Gaze Tracking", "Neural Nets", "Eye Net", "Multitask Deep Network", "Off Axis Eye Gaze Estimation", "Eye Related Inferences", "Single Deep Neural Network", "Multiple Heterogeneous Tasks", "Off Axis Camera Setting", "Eye Segmentation", "Pupil", "Cornea Center Estimation", "IR LED Glints Detection", "Magic Eyes", "Cornea", "Estimation", "Three Dimensional Displays", "Task Analysis", "Two Dimensional Displays", "Cameras", "Light Emitting Diodes", "Eye Tracking", "Gaze Estimation", "Mixed Reality", "Computer Vision", "Deep Learning", "Multi Task Learning" ], "authors": [ { "affiliation": "Magic Leap Inc, USA", "fullName": "Zhengyang Wu", "givenName": "Zhengyang", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Magic Leap Inc, USA", "fullName": "Srivignesh Rajendran", "givenName": "Srivignesh", "surname": "Rajendran", "__typename": "ArticleAuthorType" }, { "affiliation": "Magic Leap Inc, USA", "fullName": "Tarrence Van As", "givenName": "Tarrence", "surname": "Van As", "__typename": "ArticleAuthorType" }, { "affiliation": "Magic Leap Inc, USA", "fullName": "Vijay Badrinarayanan", "givenName": "Vijay", "surname": "Badrinarayanan", "__typename": "ArticleAuthorType" }, { "affiliation": "Magic Leap Inc, USA", "fullName": "Andrew Rabinovich", "givenName": "Andrew", "surname": "Rabinovich", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "3683-3687", "year": "2019", "issn": null, "isbn": "978-1-7281-5023-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "502300d677", "articleId": "1i5mMEFZAbu", "__typename": "AdjacentArticleType" }, "next": { "fno": "502300d688", "articleId": "1i5msUQsh1u", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/aina/2015/7905/0/7905a904", "title": "Implementation of an Eye Gaze Tracking System for the Disabled People", "doi": null, "abstractUrl": "/proceedings-article/aina/2015/7905a904/12OmNwEJ115", "parentPublication": { "id": "proceedings/aina/2015/7905/0", "title": "2015 IEEE 29th International Conference on Advanced Information Networking and Applications (AINA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032b003", "title": "Real Time Eye Gaze Tracking with 3D Deformable Eye-Face Model", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b003/12OmNwNeYAV", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2017/6724/0/07926555", "title": "Gaze Tracking and Object Recognition from Eye Images", "doi": null, "abstractUrl": "/proceedings-article/irc/2017/07926555/12OmNzvz6Lc", "parentPublication": { "id": "proceedings/irc/2017/6724/0", "title": "2017 First IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2006/2686/0/04027065", "title": "Free head motion eye gaze tracking using a single camera and multiple light sources", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2006/04027065/146z4Q3rfLg", "parentPublication": { "id": "proceedings/sibgrapi/2006/2686/0", "title": "2006 19th Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545635", "title": "Gaze-Aided Eye Detection via Appearance Learning", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545635/17D45X7VTgp", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2019/1198/0/119800a449", "title": "A Novel Remote Eye Gaze Tracking System Using Line Illumination Sources", "doi": null, "abstractUrl": "/proceedings-article/mipr/2019/119800a449/19wB6HJrVmM", "parentPublication": { "id": "proceedings/mipr/2019/1198/0", "title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a370", "title": "Reconstructing 3D Virtual Face with Eye Gaze from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a370/1CJcaEUfrW0", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956312", "title": "A Joint Cascaded Framework for Simultaneous Eye State, Eye Center, and Gaze Estimation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956312/1IHq8em8jug", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2023/4544/0/10042793", "title": "RavenGaze: A Dataset for Gaze Estimation Leveraging Psychological Experiment Through Eye Tracker", "doi": null, "abstractUrl": "/proceedings-article/fg/2023/10042793/1KOv3GSAGS4", "parentPublication": { "id": "proceedings/fg/2023/4544/0", "title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iisa/2020/2346/0/09284374", "title": "Eye Gaze Analysis of Students in Educational Systems", "doi": null, "abstractUrl": "/proceedings-article/iisa/2020/09284374/1pttO1dX1xC", "parentPublication": { "id": "proceedings/iisa/2020/2346/0", "title": "2020 11th International Conference on Information, Intelligence, Systems and Applications (IISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqzcvOD", "title": "2016 IEEE/ACM Symposium on Edge Computing (SEC)", "acronym": "sec", "groupId": "1816984", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNqBKTTL", "doi": "10.1109/SEC.2016.8", "title": "Low-Cost Video Transcoding at the Wireless Edge", "normalizedTitle": "Low-Cost Video Transcoding at the Wireless Edge", "abstract": "With the proliferation of hand-held devices in recent years, wireless video streaming has become an extremely popular application. Internet video streaming to mobile devices, however, faces several challenges, such as unstable connections, long latency, and high jitter. Bitrate adaptive streaming and video transcoding are widely used to overcome such problems. However, there are still several shortcomings of these approaches. Bitrate adaptive streaming cannot provide fine-grained adaptation. Moreover, video transcoding is expensive and is hard to apply for live streaming. In this work, we propose to use a low-cost video transcoding solution running at the wireless edge, such as home WiFi Access Points (APs). Instead of having expensive server-based transcoding, we designed and implemented a real-time video transcoding solution on a low-cost hardware, Raspberry Pi. By running our transcoding solution at the wireless edge, it can provide more agile adaptation to sudden network dynamics and is able to incorporate clients' feedback quickly. Our transcoding solution is transparent, low-cost, and scalable. Thus it allows broad and quick deployment in home WiFi APs (and also in cellular base stations). The evaluations reveal that our system enhances the performance of video streaming compared with other bitrate adaptive solutions. It provides higher video bitrates (at least 2.1×) without causing video stall or rebuffering.", "abstracts": [ { "abstractType": "Regular", "content": "With the proliferation of hand-held devices in recent years, wireless video streaming has become an extremely popular application. Internet video streaming to mobile devices, however, faces several challenges, such as unstable connections, long latency, and high jitter. Bitrate adaptive streaming and video transcoding are widely used to overcome such problems. However, there are still several shortcomings of these approaches. Bitrate adaptive streaming cannot provide fine-grained adaptation. Moreover, video transcoding is expensive and is hard to apply for live streaming. In this work, we propose to use a low-cost video transcoding solution running at the wireless edge, such as home WiFi Access Points (APs). Instead of having expensive server-based transcoding, we designed and implemented a real-time video transcoding solution on a low-cost hardware, Raspberry Pi. By running our transcoding solution at the wireless edge, it can provide more agile adaptation to sudden network dynamics and is able to incorporate clients' feedback quickly. Our transcoding solution is transparent, low-cost, and scalable. Thus it allows broad and quick deployment in home WiFi APs (and also in cellular base stations). The evaluations reveal that our system enhances the performance of video streaming compared with other bitrate adaptive solutions. It provides higher video bitrates (at least 2.1×) without causing video stall or rebuffering.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the proliferation of hand-held devices in recent years, wireless video streaming has become an extremely popular application. Internet video streaming to mobile devices, however, faces several challenges, such as unstable connections, long latency, and high jitter. Bitrate adaptive streaming and video transcoding are widely used to overcome such problems. However, there are still several shortcomings of these approaches. Bitrate adaptive streaming cannot provide fine-grained adaptation. Moreover, video transcoding is expensive and is hard to apply for live streaming. In this work, we propose to use a low-cost video transcoding solution running at the wireless edge, such as home WiFi Access Points (APs). Instead of having expensive server-based transcoding, we designed and implemented a real-time video transcoding solution on a low-cost hardware, Raspberry Pi. By running our transcoding solution at the wireless edge, it can provide more agile adaptation to sudden network dynamics and is able to incorporate clients' feedback quickly. Our transcoding solution is transparent, low-cost, and scalable. Thus it allows broad and quick deployment in home WiFi APs (and also in cellular base stations). The evaluations reveal that our system enhances the performance of video streaming compared with other bitrate adaptive solutions. It provides higher video bitrates (at least 2.1×) without causing video stall or rebuffering.", "fno": "3322a129", "keywords": [ "Streaming Media", "Transcoding", "Bit Rate", "Wireless Communication", "Media", "Servers", "Bandwidth", "Wireless Edge", "Adaptive Bitrate", "Video Transcoding" ], "authors": [ { "affiliation": null, "fullName": "Jongwon Yoon", "givenName": "Jongwon", "surname": "Yoon", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Peng Liu", "givenName": "Peng", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Suman Banerjee", "givenName": "Suman", "surname": "Banerjee", "__typename": "ArticleAuthorType" } ], "idPrefix": "sec", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-10-01T00:00:00", "pubType": "proceedings", "pages": "129-141", "year": "2016", "issn": null, "isbn": "978-1-5090-3322-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3322a127", "articleId": "12OmNwEJ0QK", "__typename": "AdjacentArticleType" }, "next": { "fno": "3322a142", "articleId": "12OmNrJRP7U", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/lcn/2015/6770/0/07366317", "title": "Dependency-aware distributed video transcoding in the cloud", "doi": null, "abstractUrl": "/proceedings-article/lcn/2015/07366317/12OmNBPc8yU", "parentPublication": { "id": "proceedings/lcn/2015/6770/0", "title": "2015 IEEE 40th Conference on Local Computer Networks (LCN 2015)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2012/2027/0/06266411", "title": "Video Description Length Guided Constant Quality Video Coding with Bitrate Constraint", "doi": null, "abstractUrl": "/proceedings-article/icmew/2012/06266411/12OmNvA1hcQ", "parentPublication": { "id": "proceedings/icmew/2012/2027/0", "title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486461", "title": "Fast HEVC to SCC Transcoding Based on Decision Trees", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486461/14jQfQj2fTq", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2020/03/08640235", "title": "Hardware-Assisted, Low-Cost Video Transcoding Solution in Wireless Networks", "doi": null, "abstractUrl": "/journal/tm/2020/03/08640235/17D45Xh13sp", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2019/04/08466657", "title": "Performance Analysis and Modeling of Video Transcoding Using Heterogeneous Cloud Services", "doi": null, "abstractUrl": "/journal/td/2019/04/08466657/18l6Oxr9hF6", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/sc/5555/01/10075517", "title": "Cost-Effective, Quality-Oriented Transcoding of Live-Streamed Video on Edge-Servers", "doi": null, "abstractUrl": "/journal/sc/5555/01/10075517/1LAuAtT9b1e", "parentPublication": { "id": "trans/sc", "title": "IEEE Transactions on Services Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/10089145", "title": "Adaptive Video Streaming in Multi-Tier Computing Networks: Joint Edge Transcoding and Client Enhancement", "doi": null, "abstractUrl": "/journal/tm/5555/01/10089145/1LW4pbZ8Q36", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipccc/2018/6808/0/08711214", "title": "Stride: Distributed Video Transcoding in Spark", "doi": null, "abstractUrl": "/proceedings-article/ipccc/2018/08711214/1axfFj9xALC", "parentPublication": { "id": "proceedings/ipccc/2018/6808/0", "title": "2018 IEEE 37th International Performance Computing and Communications Conference (IPCCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2020/9325/0/09232616", "title": "ComplexCTTP: Complexity Class Based Transcoding Time Prediction for Video Sequences Using Artificial Neural Network", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2020/09232616/1o56zD33yHm", "parentPublication": { "id": "proceedings/bigmm/2020/9325/0", "title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiswc/2020/7645/0/764500a072", "title": "CPU Microarchitectural Performance Characterization of Cloud Video Transcoding", "doi": null, "abstractUrl": "/proceedings-article/iiswc/2020/764500a072/1oSXRrhUKLS", "parentPublication": { "id": "proceedings/iiswc/2020/7645/0", "title": "2020 IEEE International Symposium on Workload Characterization (IISWC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1iERNL1KFws", "title": "2020 International Conference on Computing, Networking and Communications (ICNC)", "acronym": "icnc", "groupId": "1800963", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1iERPtsc3cc", "doi": "10.1109/ICNC47757.2020.9049747", "title": "Lightweight Evolving 360 VR Adaptive Video Delivery", "normalizedTitle": "Lightweight Evolving 360 VR Adaptive Video Delivery", "abstract": "Moving towards 4K and 360 Virtual Reality (VR) video streaming is becoming bandwidth prohibitive with users demanding a personalized streaming experience without increasing costs. Existing efforts to improve the streaming experience has centered largely around client-side adaptive bitrate (ABR) algorithms; however, these approaches continue to experience fundamental flaws resulting in subpar content delivery network (CDN) performance. In this paper, we propose a method that addresses long-tail content in CDNs through the implementation of a multipath-aware peer-to-peer mechanism to distribute a video-specific lightweight neural network model, optimize ABR algorithms through reinforcement learning, and actively manage multipath transport networks; all of which significantly increase the 360 VR video streaming experience. Experimental results revealed that our architecture was superior in comparison to many leading ABR algorithms in multipath networks with video bitrates doubling while simultaneously reducing disruptive field-of-view latency switches.", "abstracts": [ { "abstractType": "Regular", "content": "Moving towards 4K and 360 Virtual Reality (VR) video streaming is becoming bandwidth prohibitive with users demanding a personalized streaming experience without increasing costs. Existing efforts to improve the streaming experience has centered largely around client-side adaptive bitrate (ABR) algorithms; however, these approaches continue to experience fundamental flaws resulting in subpar content delivery network (CDN) performance. In this paper, we propose a method that addresses long-tail content in CDNs through the implementation of a multipath-aware peer-to-peer mechanism to distribute a video-specific lightweight neural network model, optimize ABR algorithms through reinforcement learning, and actively manage multipath transport networks; all of which significantly increase the 360 VR video streaming experience. Experimental results revealed that our architecture was superior in comparison to many leading ABR algorithms in multipath networks with video bitrates doubling while simultaneously reducing disruptive field-of-view latency switches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Moving towards 4K and 360 Virtual Reality (VR) video streaming is becoming bandwidth prohibitive with users demanding a personalized streaming experience without increasing costs. Existing efforts to improve the streaming experience has centered largely around client-side adaptive bitrate (ABR) algorithms; however, these approaches continue to experience fundamental flaws resulting in subpar content delivery network (CDN) performance. In this paper, we propose a method that addresses long-tail content in CDNs through the implementation of a multipath-aware peer-to-peer mechanism to distribute a video-specific lightweight neural network model, optimize ABR algorithms through reinforcement learning, and actively manage multipath transport networks; all of which significantly increase the 360 VR video streaming experience. Experimental results revealed that our architecture was superior in comparison to many leading ABR algorithms in multipath networks with video bitrates doubling while simultaneously reducing disruptive field-of-view latency switches.", "fno": "09049747", "keywords": [ "Learning Artificial Intelligence", "Peer To Peer Computing", "Video Streaming", "Virtual Reality", "Lightweight Evolving 360 VR Adaptive Video Delivery", "Personalized Streaming Experience", "Client Side Adaptive Bitrate Algorithms", "Peer To Peer Mechanism", "Video Specific Lightweight Neural Network Model", "Multipath Transport Networks", "ABR Algorithms", "Video Bitrates", "VR Video Streaming Experience", "Content Delivery Network Performance", "360 Virtual Reality Video Streaming", "Streaming Media", "Neural Networks", "Training", "Bandwidth", "Bit Rate", "Servers", "Media", "DASH", "ABR", "MPTCP", "QUIC", "360", "VR" ], "authors": [ { "affiliation": "Johns Hopkins University Applied Physics Laboratory,Laurel,MD,USA", "fullName": "Brian Hayes", "givenName": "Brian", "surname": "Hayes", "__typename": "ArticleAuthorType" }, { "affiliation": "Kennesaw State University,Atlanta,GA,USA", "fullName": "Yusun Chang", "givenName": "Yusun", "surname": "Chang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icnc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-02-01T00:00:00", "pubType": "proceedings", "pages": "815-819", "year": "2020", "issn": "2325-2626", "isbn": "978-1-7281-4905-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09049684", "articleId": "1iEROqEg10Y", "__typename": "AdjacentArticleType" }, "next": { "fno": "09049479", "articleId": "1iERXPUfwU8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icnc/2018/3652/0/08390354", "title": "Scaling 360-degree Adaptive Bitrate Video Delivery Over an SDN Architecture", "doi": null, "abstractUrl": "/proceedings-article/icnc/2018/08390354/12OmNCctfa7", "parentPublication": { "id": "proceedings/icnc/2018/3652/0", "title": "2018 International Conference on Computing, Networking and Communications (ICNC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/09894697", "title": "Towards Impact of Chunk-Level Characteristics on Mobile Live Streaming Performance", "doi": null, "abstractUrl": "/journal/tm/5555/01/09894697/1GNpdoCcRCU", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/10089145", "title": "Adaptive Video Streaming in Multi-Tier Computing Networks: Joint Edge Transcoding and Client Enhancement", "doi": null, "abstractUrl": "/journal/tm/5555/01/10089145/1LW4pbZ8Q36", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2020/11/08765424", "title": "A Control Theoretic Approach to ABR Video Streaming: A Fresh Look at PID-Based Rate Adaptation", "doi": null, "abstractUrl": "/journal/tm/2020/11/08765424/1bJTvXL10pG", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200a290", "title": "360SRL: A Sequential Reinforcement Learning Approach for ABR Tile-Based 360 Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200a290/1cdOIGM7HjO", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b846", "title": "RESA: A Real-Time Evaluation System for ABR", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b846/1cdORCFcrN6", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a113", "title": "Llama - Low Latency Adaptive Media Algorithm", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a113/1qBbHZpUxFu", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a077", "title": "CooPEC: Cooperative Prefetching and Edge Caching for Adaptive 360&#x00B0; Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a077/1qBbI2Tm5os", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a082", "title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icws/2021/1681/0/168100a208", "title": "CUBIST: High-Quality 360-Degree Video Streaming Services via Tile-based Edge Caching and FoV-Adaptive Prefetching", "doi": null, "abstractUrl": "/proceedings-article/icws/2021/168100a208/1yrHDztOVck", "parentPublication": { "id": "proceedings/icws/2021/1681/0", "title": "2021 IEEE International Conference on Web Services (ICWS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1oZBzHKi4UM", "title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)", "acronym": "svr", "groupId": "1800426", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1oZBC3Rau3K", "doi": "10.1109/SVR51698.2020.00067", "title": "Photorealism in Low-cost Virtual Reality Devices", "normalizedTitle": "Photorealism in Low-cost Virtual Reality Devices", "abstract": "This paper presents a novel framework that allows low-cost devices to visualize and interact with photorealistic scenes. To accomplish this task, we use the Unity streaming package that allows for streaming an application within its editor and the Unity's high definition render pipeline that has a proprietary ray-tracing algorithm. The framework contains a realistic scene using a ray tracing algorithm, a virtual reality camera with barrel shaders to correct the lens distortion necessary to use low-cost cardboards, and a method to collect the mobile device's spatial orientation through a browser to control the user's vision delivered via WebRTC. The proposed framework can produce low-latency, realistic, immersive environments to be accessed through low-cost HMDs and inexpensive mobile devices.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a novel framework that allows low-cost devices to visualize and interact with photorealistic scenes. To accomplish this task, we use the Unity streaming package that allows for streaming an application within its editor and the Unity's high definition render pipeline that has a proprietary ray-tracing algorithm. The framework contains a realistic scene using a ray tracing algorithm, a virtual reality camera with barrel shaders to correct the lens distortion necessary to use low-cost cardboards, and a method to collect the mobile device's spatial orientation through a browser to control the user's vision delivered via WebRTC. The proposed framework can produce low-latency, realistic, immersive environments to be accessed through low-cost HMDs and inexpensive mobile devices.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a novel framework that allows low-cost devices to visualize and interact with photorealistic scenes. To accomplish this task, we use the Unity streaming package that allows for streaming an application within its editor and the Unity's high definition render pipeline that has a proprietary ray-tracing algorithm. The framework contains a realistic scene using a ray tracing algorithm, a virtual reality camera with barrel shaders to correct the lens distortion necessary to use low-cost cardboards, and a method to collect the mobile device's spatial orientation through a browser to control the user's vision delivered via WebRTC. The proposed framework can produce low-latency, realistic, immersive environments to be accessed through low-cost HMDs and inexpensive mobile devices.", "fno": "923100a406", "keywords": [ "Cameras", "Helmet Mounted Displays", "Internet", "Ray Tracing", "Rendering Computer Graphics", "Virtual Reality", "Photorealism", "Low Cost Virtual Reality Devices", "Photorealistic Scenes", "Unity Streaming Package", "Proprietary Ray Tracing Algorithm", "Realistic Scene", "Ray Tracing Algorithm", "Virtual Reality Camera", "Low Cost Cardboards", "Mobile Device", "Low Cost HMD", "Inexpensive Mobile Devices", "Unity High Definition Render Pipeline", "Web RTC", "Ray Tracing", "Virtual Reality", "Streaming Media", "Smart Phones", "Servers", "Rendering Computer Graphics", "Protocols", "Photorealism", "Virtual Reality", "Ray Tracing" ], "authors": [ { "affiliation": "Federal University of Juiz de Fora,Postgraduate Program in Computer Science,Juiz de Fora,Minas Gerais", "fullName": "Wellingston Cataldo R. Junior", "givenName": "Wellingston Cataldo R.", "surname": "Junior", "__typename": "ArticleAuthorType" }, { "affiliation": "Federal University of Juiz de Fora,Postgraduate Program in Computer Science,Juiz de Fora,Minas Gerais", "fullName": "Lidiane T. Pereira", "givenName": "Lidiane T.", "surname": "Pereira", "__typename": "ArticleAuthorType" }, { "affiliation": "Federal University of Juiz de Fora,Postgraduate Program in Computer Science,Juiz de Fora,Minas Gerais", "fullName": "Marcelo F. Moreno", "givenName": "Marcelo F.", "surname": "Moreno", "__typename": "ArticleAuthorType" }, { "affiliation": "Federal University of Juiz de Fora,Postgraduate Program in Computer Science,Juiz de Fora,Minas Gerais", "fullName": "Rodrigo L. S. Silva", "givenName": "Rodrigo L. S.", "surname": "Silva", "__typename": "ArticleAuthorType" } ], "idPrefix": "svr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "406-412", "year": "2020", "issn": null, "isbn": "978-1-7281-9231-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "923100a398", "articleId": "1oZBBw6BBa8", "__typename": "AdjacentArticleType" }, "next": { "fno": "923100a413", "articleId": "1oZBBSo7je8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/rt/2007/1629/0/04342605", "title": "Bullet Ray Vision", "doi": null, "abstractUrl": "/proceedings-article/rt/2007/04342605/12OmNAlvHIc", "parentPublication": { "id": "proceedings/rt/2007/1629/0", "title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2006/0693/0/04061560", "title": "Applying Ray Tracing for Virtual Reality and Industrial Design", "doi": null, "abstractUrl": "/proceedings-article/rt/2006/04061560/12OmNBTs7tk", "parentPublication": { "id": "proceedings/rt/2006/0693/0", "title": "IEEE Symposium on Interactive Ray Tracing 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ams/2011/4414/0/4414a193", "title": "Reservoir: An Alternative Load Balancing Technique for Parallel Ray Tracing", "doi": null, "abstractUrl": "/proceedings-article/ams/2011/4414a193/12OmNBtl1xN", "parentPublication": { "id": "proceedings/ams/2011/4414/0", "title": "Asia International Conference on Modelling &amp; Simulation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cadgraphics/2011/4497/0/4497a087", "title": "SIMD Friendly Ray Tracing on GPU", "doi": null, "abstractUrl": "/proceedings-article/cadgraphics/2011/4497a087/12OmNxFaLiE", "parentPublication": { "id": "proceedings/cadgraphics/2011/4497/0", "title": "Computer-Aided Design and Computer Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1995/7187/0/71870027", "title": "A Hardware Acceleration Method for Volumetric Ray Tracing", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1995/71870027/12OmNxHJ9p1", "parentPublication": { "id": "proceedings/ieee-vis/1995/7187/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2012/4725/0/4725a131", "title": "Real Time Ray Tracing for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/svr/2012/4725a131/12OmNzcPAGy", "parentPublication": { "id": "proceedings/svr/2012/4725/0", "title": "2012 14th Symposium on Virtual and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/02/ttg2010020261", "title": "Real-Time Ray Tracing of Implicit Surfaces on the GPU", "doi": null, "abstractUrl": "/journal/tg/2010/02/ttg2010020261/13rRUwI5TQT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2007/06/mcg2007060036", "title": "Exploring a Boeing 777: Ray Tracing Large-Scale CAD Data", "doi": null, "abstractUrl": "/magazine/cg/2007/06/mcg2007060036/13rRUxC0SGw", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a483", "title": "Interactive Mixed Reality Rendering on Holographic Pyramid", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a483/1CJcsRpGDQI", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2020/9231/0/923100a391", "title": "ARRay-Tracing - A Middleware to Provide Ray Tracing Capabilities to Augmented Reality Libraries", "doi": null, "abstractUrl": "/proceedings-article/svr/2020/923100a391/1oZBBWaRzNu", "parentPublication": { "id": "proceedings/svr/2020/9231/0", "title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qBbG37ozSg", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "acronym": "ism", "groupId": "1001094", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qBbI2Tm5os", "doi": "10.1109/ISM.2020.00019", "title": "CooPEC: Cooperative Prefetching and Edge Caching for Adaptive 360&#x00B0; Video Streaming", "normalizedTitle": "CooPEC: Cooperative Prefetching and Edge Caching for Adaptive 360° Video Streaming", "abstract": "Dynamic Adaptive Streaming over HTTP (DASH) has emerged as the de facto solution for streaming 360&#x00B0;videos. Viewers of 360&#x00B0; videos view only a fraction of each video segment, i.e., the part that corresponds to their Field of View (FoV). To facilitate FoV-adaptive streaming, a segment can be divided into multiple tiles with the FoV corresponding to a subset of tiles. Streaming each segment in its entirety from the video server to a client can incur high communication overheads both in terms of bandwidth and latency. Caching at the network edge can reduce these overheads. However, as edge cache capacity is limited, only a subset of tiles encoded at a subset of supported resolutions may be present in the cache. A viewer, depending on its FoV,may experience cache hit and low download latency for some segments, and a cache miss resulting in high download latency from video server for other segments. This can result in the DASH client unnecessarily triggering quality switches for the following reason: low (high) latency download from edge cache (server, respectively) may be misinterpreted as high (low, respectively) network throughput estimate. In this paper, we propose CooPEC (COOperative Prefetching and Edge Caching), a prefetching and complementary caching solution which uses viewers' FoV entropy to: (i) enable a bitrate oscillation-free video streaming, (ii) reduce core network bandwidth consumption, and (iii) enhance QoE for users.", "abstracts": [ { "abstractType": "Regular", "content": "Dynamic Adaptive Streaming over HTTP (DASH) has emerged as the de facto solution for streaming 360&#x00B0;videos. Viewers of 360&#x00B0; videos view only a fraction of each video segment, i.e., the part that corresponds to their Field of View (FoV). To facilitate FoV-adaptive streaming, a segment can be divided into multiple tiles with the FoV corresponding to a subset of tiles. Streaming each segment in its entirety from the video server to a client can incur high communication overheads both in terms of bandwidth and latency. Caching at the network edge can reduce these overheads. However, as edge cache capacity is limited, only a subset of tiles encoded at a subset of supported resolutions may be present in the cache. A viewer, depending on its FoV,may experience cache hit and low download latency for some segments, and a cache miss resulting in high download latency from video server for other segments. This can result in the DASH client unnecessarily triggering quality switches for the following reason: low (high) latency download from edge cache (server, respectively) may be misinterpreted as high (low, respectively) network throughput estimate. In this paper, we propose CooPEC (COOperative Prefetching and Edge Caching), a prefetching and complementary caching solution which uses viewers' FoV entropy to: (i) enable a bitrate oscillation-free video streaming, (ii) reduce core network bandwidth consumption, and (iii) enhance QoE for users.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Dynamic Adaptive Streaming over HTTP (DASH) has emerged as the de facto solution for streaming 360°videos. Viewers of 360° videos view only a fraction of each video segment, i.e., the part that corresponds to their Field of View (FoV). To facilitate FoV-adaptive streaming, a segment can be divided into multiple tiles with the FoV corresponding to a subset of tiles. Streaming each segment in its entirety from the video server to a client can incur high communication overheads both in terms of bandwidth and latency. Caching at the network edge can reduce these overheads. However, as edge cache capacity is limited, only a subset of tiles encoded at a subset of supported resolutions may be present in the cache. A viewer, depending on its FoV,may experience cache hit and low download latency for some segments, and a cache miss resulting in high download latency from video server for other segments. This can result in the DASH client unnecessarily triggering quality switches for the following reason: low (high) latency download from edge cache (server, respectively) may be misinterpreted as high (low, respectively) network throughput estimate. In this paper, we propose CooPEC (COOperative Prefetching and Edge Caching), a prefetching and complementary caching solution which uses viewers' FoV entropy to: (i) enable a bitrate oscillation-free video streaming, (ii) reduce core network bandwidth consumption, and (iii) enhance QoE for users.", "fno": "869700a077", "keywords": [ "Cache Storage", "Cooperative Communication", "Quality Of Experience", "Storage Management", "Video Coding", "Video Streaming", "Video Server", "DASH Client", "Coo PEC", "Complementary Caching Solution", "Core Network Bandwidth Consumption", "Adaptive 360 X 00 B 0 Video Streaming", "Dynamic Adaptive Streaming Over HTTP", "Video Segment", "Fo V Adaptive Streaming", "High Communication Overheads", "Network Edge", "Edge Cache Capacity", "Cache Miss", "Cache Hit", "Bitrate Oscillation Free Video Streaming", "Qo E", "Streaming Media", "Servers", "Bandwidth", "Throughput", "Entropy", "Prefetching", "Bit Rate", "Bitrate Oscillation", "360 X 00 B 0 Video", "Caching" ], "authors": [ { "affiliation": "University of Texas at Dallas,Department of Computer Science,Richardson,Texas,75080", "fullName": "Anahita Mahzari", "givenName": "Anahita", "surname": "Mahzari", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Texas at Dallas,Department of Computer Science,Richardson,Texas,75080", "fullName": "Aliehsan Samiei", "givenName": "Aliehsan", "surname": "Samiei", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Texas at Dallas,Department of Computer Science,Richardson,Texas,75080", "fullName": "Ravi Prakash", "givenName": "Ravi", "surname": "Prakash", "__typename": "ArticleAuthorType" } ], "idPrefix": "ism", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "77-81", "year": "2020", "issn": null, "isbn": "978-1-7281-8697-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "869700a073", "articleId": "1qBbGxRt0ju", "__typename": "AdjacentArticleType" }, "next": { "fno": "869700a082", "articleId": "1qBbIEON8UU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2016/4571/0/4571a407", "title": "Adaptive 360 VR Video Streaming Based on MPEG-DASH SRD", "doi": null, "abstractUrl": "/proceedings-article/ism/2016/4571a407/12OmNx7XH8C", "parentPublication": { "id": "proceedings/ism/2016/4571/0", "title": "2016 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/09842378", "title": "Muster: Multi-source Streaming for Tile-based 360&#x00B0; Videos within Cloud Native 5G Networks", "doi": null, "abstractUrl": "/journal/tm/5555/01/09842378/1FlM107xCMw", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/2022/9792/0/09913007", "title": "Deep Reinforcement Learning Based Adaptive 360-degree Video Streaming with Field of View Joint Prediction", "doi": null, "abstractUrl": "/proceedings-article/iscc/2022/09913007/1HBK3Mimize", "parentPublication": { "id": "proceedings/iscc/2022/9792/0", "title": "2022 IEEE Symposium on Computers and Communications (ISCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifip-networking/2019/16/0/08999460", "title": "Advancing user quality of experience in 360-degree video streaming", "doi": null, "abstractUrl": "/proceedings-article/ifip-networking/2019/08999460/1hHLyJf1thC", "parentPublication": { "id": "proceedings/ifip-networking/2019/16/0", "title": "2019 IFIP Networking Conference (IFIP Networking)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2020/4199/0/09016449", "title": "Buffer Based Adaptation Using Scalable Video Coding for 360-Degree Video Streaming over NDN", "doi": null, "abstractUrl": "/proceedings-article/icoin/2020/09016449/1hQqTVygVbO", "parentPublication": { "id": "proceedings/icoin/2020/4199/0", "title": "2020 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/07/09261971", "title": "Online Bitrate Selection for Viewport Adaptive 360-Degree Video Streaming", "doi": null, "abstractUrl": "/journal/tm/2022/07/09261971/1oPzPzmWa9W", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2021/02/09345461", "title": "Low Quality for High Quality: Exploiting 2K Frames for Supporting Efficient 4K-Quality Pervasive Video Streaming Applications", "doi": null, "abstractUrl": "/magazine/pc/2021/02/09345461/1qTYD4zb3oc", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2020/9916/0/991600a291", "title": "MEC-Assisted FoV-Aware and QoE-Driven Adaptive 360&#x00B0; Video Streaming for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/msn/2020/991600a291/1sBO3kw7jnq", "parentPublication": { "id": "proceedings/msn/2020/9916/0", "title": "2020 16th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2020/9916/0/991600a049", "title": "A QoE-based 360<sup>&#x00B0;</sup> Video Adaptive Bitrate Delivery and Caching Scheme for C-RAN", "doi": null, "abstractUrl": "/proceedings-article/msn/2020/991600a049/1sBO4QX9foc", "parentPublication": { "id": "proceedings/msn/2020/9916/0", "title": "2020 16th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icws/2021/1681/0/168100a208", "title": "CUBIST: High-Quality 360-Degree Video Streaming Services via Tile-based Edge Caching and FoV-Adaptive Prefetching", "doi": null, "abstractUrl": "/proceedings-article/icws/2021/168100a208/1yrHDztOVck", "parentPublication": { "id": "proceedings/icws/2021/1681/0", "title": "2021 IEEE International Conference on Web Services (ICWS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qBbG37ozSg", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "acronym": "ism", "groupId": "1001094", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qBbILxLSZG", "doi": "10.1109/ISM.2020.00009", "title": "Dynamic Segment Repackaging at the Edge for HTTP Adaptive Streaming", "normalizedTitle": "Dynamic Segment Repackaging at the Edge for HTTP Adaptive Streaming", "abstract": "Adaptive video streaming systems typically support different media delivery formats, e.g., MPEG-DASH and HLS, replicating the same content multiple times into the network. Such a diversified system results in inefficient use of storage, caching, and bandwidth resources. The Common Media Application Format (CMAF) emerges to simplify HTTP Adaptive Streaming (HAS), providing a single encoding and packaging format of segmented media content and offering the opportunities of bandwidth savings, more cache hits, and less storage needed. However, CMAF is not yet supported by most devices. To solve this issue, we present a solution where we maintain the main advantages of CMAF while supporting heterogeneous devices using different media delivery formats. For that purpose, we propose to dynamically convert the content from CMAF to the desired media delivery format at an edge node. We study the bandwidth savings with our proposed approach using an analytical model and simulation, resulting in bandwidth savings of up to 20% with different media delivery format distributions. We analyze the runtime impact of the required operations on the segmented content performed in two scenarios: (i) the classic one, with four different media delivery formats, and (ii) the proposed scenario, using CMAF-only delivery through the network. We compare both scenarios with different edge compute power assumptions. Finally, we perform experiments in a real video streaming testbed delivering MPEG-DASH using CMAF content to serve a DASH and an HLS client, performing the media conversion for the latter one.", "abstracts": [ { "abstractType": "Regular", "content": "Adaptive video streaming systems typically support different media delivery formats, e.g., MPEG-DASH and HLS, replicating the same content multiple times into the network. Such a diversified system results in inefficient use of storage, caching, and bandwidth resources. The Common Media Application Format (CMAF) emerges to simplify HTTP Adaptive Streaming (HAS), providing a single encoding and packaging format of segmented media content and offering the opportunities of bandwidth savings, more cache hits, and less storage needed. However, CMAF is not yet supported by most devices. To solve this issue, we present a solution where we maintain the main advantages of CMAF while supporting heterogeneous devices using different media delivery formats. For that purpose, we propose to dynamically convert the content from CMAF to the desired media delivery format at an edge node. We study the bandwidth savings with our proposed approach using an analytical model and simulation, resulting in bandwidth savings of up to 20% with different media delivery format distributions. We analyze the runtime impact of the required operations on the segmented content performed in two scenarios: (i) the classic one, with four different media delivery formats, and (ii) the proposed scenario, using CMAF-only delivery through the network. We compare both scenarios with different edge compute power assumptions. Finally, we perform experiments in a real video streaming testbed delivering MPEG-DASH using CMAF content to serve a DASH and an HLS client, performing the media conversion for the latter one.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Adaptive video streaming systems typically support different media delivery formats, e.g., MPEG-DASH and HLS, replicating the same content multiple times into the network. Such a diversified system results in inefficient use of storage, caching, and bandwidth resources. The Common Media Application Format (CMAF) emerges to simplify HTTP Adaptive Streaming (HAS), providing a single encoding and packaging format of segmented media content and offering the opportunities of bandwidth savings, more cache hits, and less storage needed. However, CMAF is not yet supported by most devices. To solve this issue, we present a solution where we maintain the main advantages of CMAF while supporting heterogeneous devices using different media delivery formats. For that purpose, we propose to dynamically convert the content from CMAF to the desired media delivery format at an edge node. We study the bandwidth savings with our proposed approach using an analytical model and simulation, resulting in bandwidth savings of up to 20% with different media delivery format distributions. We analyze the runtime impact of the required operations on the segmented content performed in two scenarios: (i) the classic one, with four different media delivery formats, and (ii) the proposed scenario, using CMAF-only delivery through the network. We compare both scenarios with different edge compute power assumptions. Finally, we perform experiments in a real video streaming testbed delivering MPEG-DASH using CMAF content to serve a DASH and an HLS client, performing the media conversion for the latter one.", "fno": "869700a017", "keywords": [ "Transport Protocols", "Video Coding", "Video Streaming", "Dynamic Segment Repackaging", "HTTP Adaptive Streaming", "Adaptive Video Streaming Systems", "MPEG DASH", "Bandwidth Resources", "Common Media Application Format", "Single Encoding", "Packaging Format", "Segmented Media Content", "Bandwidth Savings", "Desired Media Delivery Format", "Segmented Content", "CMAF Only Delivery", "CMAF Content", "Media Conversion", "Edge Compute Power Assumptions", "Media", "Streaming Media", "Servers", "Bandwidth", "Analytical Models", "Transform Coding", "Encryption", "CMAF", "Transmuxing", "HAS" ], "authors": [ { "affiliation": "Institute of Information Technology, Alpen-Adria-Universität Klagenfurt,Austria", "fullName": "Jesús Aguilar-Armijo", "givenName": "Jesús", "surname": "Aguilar-Armijo", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Information Technology, Alpen-Adria-Universität Klagenfurt,Austria", "fullName": "Babak Taraghi", "givenName": "Babak", "surname": "Taraghi", "__typename": "ArticleAuthorType" }, { "affiliation": "Bitmovin,Klagenfurt,Austria", "fullName": "Christian Timmerer", "givenName": "Christian", "surname": "Timmerer", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Information Technology, Alpen-Adria-Universität Klagenfurt,Austria", "fullName": "Hermann Hellwagner", "givenName": "Hermann", "surname": "Hellwagner", "__typename": "ArticleAuthorType" } ], "idPrefix": "ism", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "17-24", "year": "2020", "issn": null, "isbn": "978-1-7281-8697-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "869700a009", "articleId": "1qBbGSLP3m8", "__typename": "AdjacentArticleType" }, "next": { "fno": "869700a025", "articleId": "1qBbG5MSIHS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmew/2015/7079/0/07169746", "title": "Live transcoding and streaming-as-a-service with MPEG-DASH", "doi": null, "abstractUrl": "/proceedings-article/icmew/2015/07169746/12OmNANkock", "parentPublication": { "id": "proceedings/icmew/2015/7079/0", "title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2013/0015/0/06607498", "title": "Dynamic Adaptive Streaming over HTTP/2.0", "doi": null, "abstractUrl": "/proceedings-article/icme/2013/06607498/12OmNC3XhoI", "parentPublication": { "id": "proceedings/icme/2013/0015/0", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiccsa/2015/0478/0/07507144", "title": "DASH-DMS: To improve streaming video over HTTP", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2015/07507144/12OmNCdBDLS", "parentPublication": { "id": "proceedings/aiccsa/2015/0478/0", "title": "2015 IEEE/ACS 12th International Conference of Computer Systems and Applications (AICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019482", "title": "Network-assisted strategy for dash over CCN", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019482/12OmNs0C9KU", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2016/1552/0/07574709", "title": "ARBITER: Adaptive rate-based intelligent HTTP streaming algorithm", "doi": null, "abstractUrl": "/proceedings-article/icmew/2016/07574709/12OmNvjQ91D", "parentPublication": { "id": "proceedings/icmew/2016/1552/0", "title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2013/0015/0/06607500", "title": "An experimental analysis of Dynamic Adaptive Streaming over HTTP in Content Centric Networks", "doi": null, "abstractUrl": "/proceedings-article/icme/2013/06607500/12OmNwKoZe4", "parentPublication": { "id": "proceedings/icme/2013/0015/0", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnp/2013/1270/0/06733626", "title": "Modeling, identifying, and simulating Dynamic Adaptive Streaming over HTTP", "doi": null, "abstractUrl": "/proceedings-article/icnp/2013/06733626/12OmNzEVRVX", "parentPublication": { "id": "proceedings/icnp/2013/1270/0", "title": "2013 21st IEEE International Conference on Network Protocols (ICNP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2005/03/u3059", "title": "Segment-Based Proxy Caching for Internet Streaming Media Delivery", "doi": null, "abstractUrl": "/magazine/mu/2005/03/u3059/13rRUygBw4g", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2019/8350/0/08718153", "title": "Dynamic Adaptive Streaming Over HTTP Using Scalable Video Coding with Multipath TCP in SDN", "doi": null, "abstractUrl": "/proceedings-article/icoin/2019/08718153/1aIRYgvOBva", "parentPublication": { "id": "proceedings/icoin/2019/8350/0", "title": "2019 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102775", "title": "Mipso: Multi-Period Per-Scene Optimization For Http Adaptive Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102775/1kwrkKJcgLK", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwl8GHU", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "acronym": "3dui", "groupId": "1001623", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNx8wTmi", "doi": "10.1109/3DUI.2013.6550236", "title": "Poster: Using the whole body for multi-channel gestural interface", "normalizedTitle": "Poster: Using the whole body for multi-channel gestural interface", "abstract": "With the proliferation of touch-screen devices and sensors that are able to track full-body movement in real-time, consumers are experiencing a shift in how they may interface with digital content. Such interfaces sense and interpret human motion, and so interaction metaphors must be built with the body in mind. The hands are the most dexterous and among the most expressive parts of our bodies, and hence are central to the majority of modern gestural input paradigms. Making fuller use of the whole body by considering more than the hands enables a greater array of possible gestural input channels and has not been thoroughly explored. This paper discusses design considerations for somatic interactions that employ multiple degrees of bodily freedom to grant synchronous and parallel input channels. Due to the physical and spatial nature of our bodies, such interfaces are well-suited to 3D graphical applications and we present a user study comparing a traditional method of virtual object manipulation with the gestural equivalents that make use of the whole body as a multi-channel interface.", "abstracts": [ { "abstractType": "Regular", "content": "With the proliferation of touch-screen devices and sensors that are able to track full-body movement in real-time, consumers are experiencing a shift in how they may interface with digital content. Such interfaces sense and interpret human motion, and so interaction metaphors must be built with the body in mind. The hands are the most dexterous and among the most expressive parts of our bodies, and hence are central to the majority of modern gestural input paradigms. Making fuller use of the whole body by considering more than the hands enables a greater array of possible gestural input channels and has not been thoroughly explored. This paper discusses design considerations for somatic interactions that employ multiple degrees of bodily freedom to grant synchronous and parallel input channels. Due to the physical and spatial nature of our bodies, such interfaces are well-suited to 3D graphical applications and we present a user study comparing a traditional method of virtual object manipulation with the gestural equivalents that make use of the whole body as a multi-channel interface.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the proliferation of touch-screen devices and sensors that are able to track full-body movement in real-time, consumers are experiencing a shift in how they may interface with digital content. Such interfaces sense and interpret human motion, and so interaction metaphors must be built with the body in mind. The hands are the most dexterous and among the most expressive parts of our bodies, and hence are central to the majority of modern gestural input paradigms. Making fuller use of the whole body by considering more than the hands enables a greater array of possible gestural input channels and has not been thoroughly explored. This paper discusses design considerations for somatic interactions that employ multiple degrees of bodily freedom to grant synchronous and parallel input channels. Due to the physical and spatial nature of our bodies, such interfaces are well-suited to 3D graphical applications and we present a user study comparing a traditional method of virtual object manipulation with the gestural equivalents that make use of the whole body as a multi-channel interface.", "fno": "06550236", "keywords": [ "Three Dimensional Displays", "Switches", "Standards", "Human Computer Interaction", "Sensors", "Usability", "Analysis Of Variance" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Univ. Coll. London, London, UK", "fullName": "William Steptoe", "givenName": "William", "surname": "Steptoe", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Univ. Coll. London, London, UK", "fullName": "Lu Zhao", "givenName": null, "surname": "Lu Zhao", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dui", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-03-01T00:00:00", "pubType": "proceedings", "pages": "177-178", "year": "2013", "issn": null, "isbn": "978-1-4673-6097-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06550235", "articleId": "12OmNwvVrEN", "__typename": "AdjacentArticleType" }, "next": { "fno": "06550237", "articleId": "12OmNqC2uWf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2008/2047/0/04476592", "title": "Tech-note: rapMenu: Remote Menu Selection Using Freehand Gestural Input", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476592/12OmNAS9zL1", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2011/0039/0/05759479", "title": "Bimanual gestural interface for virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759479/12OmNzTH0P2", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2006/2503/0/25030231", "title": "Robust Spotting of Key Gestures from Whole Body Motion Sequence", "doi": null, "abstractUrl": "/proceedings-article/fg/2006/25030231/12OmNzkuKDG", "parentPublication": { "id": "proceedings/fg/2006/2503/0", "title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/01/mcg2017010070", "title": "Improving 3D Character Posing with a Gestural Interface", "doi": null, "abstractUrl": "/magazine/cg/2017/01/mcg2017010070/13rRUwInv9a", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2012/04/mco2012040042", "title": "Gestural Interaction in Vehicular Applications", "doi": null, "abstractUrl": "/magazine/co/2012/04/mco2012040042/13rRUwInvNU", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/02/mcg2013020047", "title": "3D Freehand Gestural Navigation for Interactive Public Displays", "doi": null, "abstractUrl": "/magazine/cg/2013/02/mcg2013020047/13rRUy0HYMj", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a834", "title": "Bouncing Seat: An Immersive Virtual Locomotion Interface with LSTM Based Body Gesture Estimation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a834/1CJeJ5XEJSE", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900c307", "title": "Accurate 3D Hand Pose Estimation for Whole-Body 3D Human Mesh Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900c307/1G56n5brhQI", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300g981", "title": "Single-Network Whole-Body Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300g981/1hQqncy7anu", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100b749", "title": "FrankMocap: A Monocular 3D Whole-Body Pose Estimation System via Regression and Integration", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100b749/1yNiwgGnWhi", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1gQYgEU", "doi": "10.1109/VR.2018.8446259", "title": "Hands-Free Interaction for Augmented Reality in Vascular Interventions", "normalizedTitle": "Hands-Free Interaction for Augmented Reality in Vascular Interventions", "abstract": "Vascular interventions are minimally invasive surgical procedures in which a physician navigates a catheter through a patient's vasculature to a desired destination in the patient's body. Since perception of relevant patient anatomy is limited in procedures of this sort, virtual reality and augmented reality systems have been developed to assist in 3D navigation. These systems often require user interaction, yet both of the physician's hands may already be busy performing the procedure. To address this need, we demonstrate hands-free interaction techniques that use voice and head tracking to allow the physician to interact with 3D virtual content on a head-worn display while making both hands available intraoperatively. Our approach supports rotation and scaling of 3D anatomical models that appear to reside in the surrounding environment through small head rotations using first-order control, and rigid body transformation of those models using zero-order control. This allows the physician to easily manipulate a model while it stays close to the center of their field of view.", "abstracts": [ { "abstractType": "Regular", "content": "Vascular interventions are minimally invasive surgical procedures in which a physician navigates a catheter through a patient's vasculature to a desired destination in the patient's body. Since perception of relevant patient anatomy is limited in procedures of this sort, virtual reality and augmented reality systems have been developed to assist in 3D navigation. These systems often require user interaction, yet both of the physician's hands may already be busy performing the procedure. To address this need, we demonstrate hands-free interaction techniques that use voice and head tracking to allow the physician to interact with 3D virtual content on a head-worn display while making both hands available intraoperatively. Our approach supports rotation and scaling of 3D anatomical models that appear to reside in the surrounding environment through small head rotations using first-order control, and rigid body transformation of those models using zero-order control. This allows the physician to easily manipulate a model while it stays close to the center of their field of view.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Vascular interventions are minimally invasive surgical procedures in which a physician navigates a catheter through a patient's vasculature to a desired destination in the patient's body. Since perception of relevant patient anatomy is limited in procedures of this sort, virtual reality and augmented reality systems have been developed to assist in 3D navigation. These systems often require user interaction, yet both of the physician's hands may already be busy performing the procedure. To address this need, we demonstrate hands-free interaction techniques that use voice and head tracking to allow the physician to interact with 3D virtual content on a head-worn display while making both hands available intraoperatively. Our approach supports rotation and scaling of 3D anatomical models that appear to reside in the surrounding environment through small head rotations using first-order control, and rigid body transformation of those models using zero-order control. This allows the physician to easily manipulate a model while it stays close to the center of their field of view.", "fno": "08446259", "keywords": [ "Augmented Reality", "Biomedical Equipment", "Catheters", "Computer Graphics", "Medical Computing", "Medical Robotics", "Surgery", "Augmented Reality", "Vascular Interventions", "Minimally Invasive Surgical Procedures", "Physician", "Catheter", "Relevant Patient Anatomy", "Virtual Reality", "Reality Systems", "User Interaction", "Hands Free Interaction Techniques", "Head Tracking", "3 D Virtual Content", "Head Worn Display", "3 D Anatomical Models", "Head Rotations", "Rigid Body Transformation", "Head", "Three Dimensional Displays", "Solid Modeling", "Surgery", "Cameras", "Augmented Reality", "Hands Free Interaction", "Augmented Reality", "Vascular Interventions", "Head Tracking", "Head Worn Display" ], "authors": [ { "affiliation": "Columbia University, USA", "fullName": "Alon Grinshpoon", "givenName": "Alon", "surname": "Grinshpoon", "__typename": "ArticleAuthorType" }, { "affiliation": "Columbia University, USA", "fullName": "Shirin Sadri", "givenName": "Shirin", "surname": "Sadri", "__typename": "ArticleAuthorType" }, { "affiliation": "Columbia University, USA", "fullName": "Gabrielle J. Loeb", "givenName": "Gabrielle J.", "surname": "Loeb", "__typename": "ArticleAuthorType" }, { "affiliation": "Columbia University, USA", "fullName": "Carmine Elvezio", "givenName": "Carmine", "surname": "Elvezio", "__typename": "ArticleAuthorType" }, { "affiliation": "Columbia University, USA", "fullName": "Steven K. Feiner", "givenName": "Steven K.", "surname": "Feiner", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "751-752", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08447551", "articleId": "13bd1h03qOr", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446319", "articleId": "13bd1AITn9X", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2007/1749/0/04538837", "title": "Contextual Anatomic Mimesis Hybrid In-Situ Visualization Method for Improving Multi-Sensory Depth Perception in Medical Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2007/04538837/12OmNx5pj13", "parentPublication": { "id": "proceedings/ismar/2007/1749/0", "title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a172", "title": "[POSTER] Hands-Free AR Work Support System Monitoring Work Progress with Point-cloud Data Processing", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a172/12OmNyO8tSH", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a074", "title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/07/08723303", "title": "Errata to &#x201C;RingText: Dwell-Free and Hands-Free Text Entry for Mobile Head-Mounted Displays Using Head Motions&#x201D; [May 19 1991-2001]", "doi": null, "abstractUrl": "/journal/tg/2019/07/08723303/1aqzjJfQFCU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798087", "title": "Enhanced Geometric Techniques for Point Marking in Model-Free Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798087/1cJ14ip9JAc", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943734", "title": "Manipulating 3D Anatomic Models in Augmented Reality: Comparing a Hands-Free Approach and a Manual Approach", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943734/1grOL2scnw4", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2020/03/08993789", "title": "Augmented and Virtual Reality in Surgery", "doi": null, "abstractUrl": "/magazine/cs/2020/03/08993789/1hkQPiQFzsQ", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089433", "title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a344", "title": "Exploration of Hands-free Text Entry Techniques For Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a344/1pysyrYBX5C", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a569", "title": "Investigation of Microcirculatory Effects of Experiencing Burning Hands in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a569/1tnXxLHfCOQ", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJbEwHHqEg", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJbHGJZxeM", "doi": "10.1109/VR51125.2022.00090", "title": "Systematic Design Space Exploration of Discrete Virtual Rotations in VR", "normalizedTitle": "Systematic Design Space Exploration of Discrete Virtual Rotations in VR", "abstract": "Continuous virtual rotation is likely one of the biggest contributors to cybersickness, while simultaneously being necessary for many VR scenarios where the user, for instance, is sitting in a bus, at an office desk, or on a couch and therefore, limited in physical body rotation. A possible solution is discrete virtual rotation, such as already broadly accepted in translational movements (teleportation). In this work, we want to help increase the knowledge about discrete virtual rotations. We classify existing work and systematically investigate the two dimensionstarget(rotation)acquisition(selectionvs. directional)and body-based (yes vs. no) regarding their impact on the performance in a naive and a primed rotational search task, spatial orientation, and usability. We do find the novel virtual rotation selection most successful in both search tasks and no difference in the factor bodybased on spatial orientation.", "abstracts": [ { "abstractType": "Regular", "content": "Continuous virtual rotation is likely one of the biggest contributors to cybersickness, while simultaneously being necessary for many VR scenarios where the user, for instance, is sitting in a bus, at an office desk, or on a couch and therefore, limited in physical body rotation. A possible solution is discrete virtual rotation, such as already broadly accepted in translational movements (teleportation). In this work, we want to help increase the knowledge about discrete virtual rotations. We classify existing work and systematically investigate the two dimensionstarget(rotation)acquisition(selectionvs. directional)and body-based (yes vs. no) regarding their impact on the performance in a naive and a primed rotational search task, spatial orientation, and usability. We do find the novel virtual rotation selection most successful in both search tasks and no difference in the factor bodybased on spatial orientation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Continuous virtual rotation is likely one of the biggest contributors to cybersickness, while simultaneously being necessary for many VR scenarios where the user, for instance, is sitting in a bus, at an office desk, or on a couch and therefore, limited in physical body rotation. A possible solution is discrete virtual rotation, such as already broadly accepted in translational movements (teleportation). In this work, we want to help increase the knowledge about discrete virtual rotations. We classify existing work and systematically investigate the two dimensionstarget(rotation)acquisition(selectionvs. directional)and body-based (yes vs. no) regarding their impact on the performance in a naive and a primed rotational search task, spatial orientation, and usability. We do find the novel virtual rotation selection most successful in both search tasks and no difference in the factor bodybased on spatial orientation.", "fno": "961700a693", "keywords": [ "Ergonomics", "Human Factors", "Virtual Reality", "Cybersickness", "Spatial Orientation", "Usability", "Rotational Search Task", "Virtual Rotation Selection", "Continuous Virtual Rotation", "Discrete Virtual Rotation", "Systematic Design Space Exploration", "Human Computer Interaction", "Three Dimensional Displays", "Systematics", "Cybersickness", "Conferences", "Teleportation", "Space Exploration", "Human Centered Computing", "Mixed Augmented Reality", "Interaction Techniques", "Empirical Studies In HCI" ], "authors": [ { "affiliation": "University of Trier,Germany", "fullName": "Daniel Zielasko", "givenName": "Daniel", "surname": "Zielasko", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Trier,Germany", "fullName": "Jonas Heib", "givenName": "Jonas", "surname": "Heib", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Trier,Germany", "fullName": "Benjamin Weyers", "givenName": "Benjamin", "surname": "Weyers", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "693-702", "year": "2022", "issn": null, "isbn": "978-1-6654-9617-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "961700a683", "articleId": "1CJbQ0Iu1zO", "__typename": "AdjacentArticleType" }, "next": { "fno": "961700a703", "articleId": "1CJbGtoliuY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892227", "title": "Guided head rotation and amplified head rotation: Evaluating semi-natural travel and viewing techniques in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892227/12OmNwseEYz", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642365", "title": "VR Exploration Assistance through Automatic Occlusion Removal", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642365/17PYEj2mz9Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09736631", "title": "On Rotation Gains Within and Beyond Perceptual Limitations for Seated VR", "doi": null, "abstractUrl": "/journal/tg/5555/01/09736631/1BN1UtLinTi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a238", "title": "Comparing Teleportation Methods for Travel in Everyday Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a238/1CJdYyJV76E", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09894041", "title": "Integrating Continuous and Teleporting VR Locomotion Into a Seamless &#x2018;HyperJump&#x2019; Paradigm", "doi": null, "abstractUrl": "/journal/tg/5555/01/09894041/1GIqrCx8RCE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10075482", "title": "An Evaluation of View Rotation Techniques for Seated Navigation in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/5555/01/10075482/1LAuCOR3RE4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090609", "title": "Exploring Effect Of Different External Stimuli On Body Association In VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090609/1jIxuOtbTAQ", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/09/09332290", "title": "Quantifiable Fine-Grain Occlusion Removal Assistance for Efficient VR Exploration", "doi": null, "abstractUrl": "/journal/tg/2022/09/09332290/1qzsRxXpW4o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a393", "title": "Effects of a handlebar on standing VR locomotion", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a393/1tnX2vv1TS8", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a504", "title": "Velocity Guided Amplification of View Rotation for Seated VR Scene Exploration", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a504/1tnXyTs22BO", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MNgk3BHlS0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1MNgCwutI3u", "doi": "10.1109/VR55154.2023.00028", "title": "Tell Me Where To Go: Voice-Controlled Hands-Free Locomotion for Virtual Reality Systems", "normalizedTitle": "Tell Me Where To Go: Voice-Controlled Hands-Free Locomotion for Virtual Reality Systems", "abstract": "As locomotion is an important factor in improving Virtual Reality (VR) immersion and usability, research in this area has been and continues to be a crucial aspect for the success of VR applications. In recent years, a variety of techniques have been developed and evaluated, ranging from abstract control, vehicle, and teleportation techniques to more realistic techniques such as motion, gestures, and gaze. However, when it comes to hands-free scenarios, for example to increase the overall accessibility of an application or in medical scenarios under sterile conditions, most of the announced techniques cannot be applied. This is where the use of speech as an intuitive means of navigation comes in handy. As systems become more capable of understanding and producing speech, voice interfaces become a valuable alternative for input on all types of devices. This takes the quality of hands-free interaction to a new level. However, intuitive user-assisted speech interaction is difficult to realize due to semantic ambiguities in natural language utterances as well as the high real-time requirements of these systems. In this paper, we investigate steering-based locomotion and selection-based locomotion using three speech-based, hands-free methods and compare them with leaning as an established alternative. Our results show that landmark-based locomotion is a convenient, fast, and intuitive way to move between locations in a VR scene. Furthermore, we show that in scenarios where landmarks are not available, number grid-based navigation is a successful solution. Based on this, we conclude that speech is a suitable alternative in hands-free scenar-ios, and exciting ideas are emerging for future work focused on developing hands-free ad hoc navigation systems for scenes where landmarks do not exist or are difficult to articulate or recognize.", "abstracts": [ { "abstractType": "Regular", "content": "As locomotion is an important factor in improving Virtual Reality (VR) immersion and usability, research in this area has been and continues to be a crucial aspect for the success of VR applications. In recent years, a variety of techniques have been developed and evaluated, ranging from abstract control, vehicle, and teleportation techniques to more realistic techniques such as motion, gestures, and gaze. However, when it comes to hands-free scenarios, for example to increase the overall accessibility of an application or in medical scenarios under sterile conditions, most of the announced techniques cannot be applied. This is where the use of speech as an intuitive means of navigation comes in handy. As systems become more capable of understanding and producing speech, voice interfaces become a valuable alternative for input on all types of devices. This takes the quality of hands-free interaction to a new level. However, intuitive user-assisted speech interaction is difficult to realize due to semantic ambiguities in natural language utterances as well as the high real-time requirements of these systems. In this paper, we investigate steering-based locomotion and selection-based locomotion using three speech-based, hands-free methods and compare them with leaning as an established alternative. Our results show that landmark-based locomotion is a convenient, fast, and intuitive way to move between locations in a VR scene. Furthermore, we show that in scenarios where landmarks are not available, number grid-based navigation is a successful solution. Based on this, we conclude that speech is a suitable alternative in hands-free scenar-ios, and exciting ideas are emerging for future work focused on developing hands-free ad hoc navigation systems for scenes where landmarks do not exist or are difficult to articulate or recognize.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As locomotion is an important factor in improving Virtual Reality (VR) immersion and usability, research in this area has been and continues to be a crucial aspect for the success of VR applications. In recent years, a variety of techniques have been developed and evaluated, ranging from abstract control, vehicle, and teleportation techniques to more realistic techniques such as motion, gestures, and gaze. However, when it comes to hands-free scenarios, for example to increase the overall accessibility of an application or in medical scenarios under sterile conditions, most of the announced techniques cannot be applied. This is where the use of speech as an intuitive means of navigation comes in handy. As systems become more capable of understanding and producing speech, voice interfaces become a valuable alternative for input on all types of devices. This takes the quality of hands-free interaction to a new level. However, intuitive user-assisted speech interaction is difficult to realize due to semantic ambiguities in natural language utterances as well as the high real-time requirements of these systems. In this paper, we investigate steering-based locomotion and selection-based locomotion using three speech-based, hands-free methods and compare them with leaning as an established alternative. Our results show that landmark-based locomotion is a convenient, fast, and intuitive way to move between locations in a VR scene. Furthermore, we show that in scenarios where landmarks are not available, number grid-based navigation is a successful solution. Based on this, we conclude that speech is a suitable alternative in hands-free scenar-ios, and exciting ideas are emerging for future work focused on developing hands-free ad hoc navigation systems for scenes where landmarks do not exist or are difficult to articulate or recognize.", "fno": "481500a123", "keywords": [ "Visualization", "Three Dimensional Displays", "Navigation", "Semantics", "Virtual Reality", "Speech Recognition", "Teleportation", "Human Centered Computing Human Computer Interaction HCI", "Computing Methodologies Artificial Intelligence Natural Language Processing Speech Recognition" ], "authors": [ { "affiliation": "University of Jena", "fullName": "Jan Hombeck", "givenName": "Jan", "surname": "Hombeck", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Jena", "fullName": "Henrik Voigt", "givenName": "Henrik", "surname": "Voigt", "__typename": "ArticleAuthorType" }, { "affiliation": "University Hospital Cologne", "fullName": "Timo Heggemann", "givenName": "Timo", "surname": "Heggemann", "__typename": "ArticleAuthorType" }, { "affiliation": "University Hospital Cologne", "fullName": "Rabi R. Datta", "givenName": "Rabi R.", "surname": "Datta", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Jena", "fullName": "Kai Lawonn", "givenName": "Kai", "surname": "Lawonn", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-03-01T00:00:00", "pubType": "proceedings", "pages": "123-134", "year": "2023", "issn": null, "isbn": "979-8-3503-4815-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "481500a115", "articleId": "1MNgnMu6Sju", "__typename": "AdjacentArticleType" }, "next": { "fno": "481500a135", "articleId": "1MNgmceltOU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892348", "title": "Steering locomotion by vestibular perturbation in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892282", "title": "Development and evaluation of a hands-free motion cueing interface for ground-based navigation", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892282/12OmNwoxSdn", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446130", "title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09737429", "title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness", "doi": null, "abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09894041", "title": "Integrating Continuous and Teleporting VR Locomotion Into a Seamless &#x2018;HyperJump&#x2019; Paradigm", "doi": null, "abstractUrl": "/journal/tg/5555/01/09894041/1GIqrCx8RCE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a501", "title": "Exploring Three-Dimensional Locomotion Techniques in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a501/1J7WrBbMYEg", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a530", "title": "The Evaluation of Gait-Free Locomotion Methods with Eye Movement in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a530/1J7WtHqguHu", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090536", "title": "Elastic-Move: Passive Haptic Device with Force Feedback for Virtual Reality Locomotion", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090536/1jIxqFQXvSE", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a380", "title": "Evaluating VR Sickness in VR Locomotion Techniques", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a480", "title": "Analysis of Positional Tracking Space Usage when using Teleportation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a480/1tnXfrT4ere", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1fHkkWQ0aEE", "title": "2019 International Conference on Cyberworlds (CW)", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1fHkkZzKire", "doi": "10.1109/CW.2019.00018", "title": "Music in the Air with Leap Motion Controller", "normalizedTitle": "Music in the Air with Leap Motion Controller", "abstract": "Not many people know about the first electronic musical instrument - the theremin - and can play it. The idea of this instrument is very groundbreaking: it is played without physical contact with it and in the same way as we sing but by using hands in place of our vocal cords. In this paper we consider how to implement the theremin with a computer using very different physical principles of optical hand tracking and by adding advantages of visual interfaces. The goal of this research is to eventually fulfill the dream of the inventor to make the theremin a musical instrument for everyone and to prove that everyone can play music.", "abstracts": [ { "abstractType": "Regular", "content": "Not many people know about the first electronic musical instrument - the theremin - and can play it. The idea of this instrument is very groundbreaking: it is played without physical contact with it and in the same way as we sing but by using hands in place of our vocal cords. In this paper we consider how to implement the theremin with a computer using very different physical principles of optical hand tracking and by adding advantages of visual interfaces. The goal of this research is to eventually fulfill the dream of the inventor to make the theremin a musical instrument for everyone and to prove that everyone can play music.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Not many people know about the first electronic musical instrument - the theremin - and can play it. The idea of this instrument is very groundbreaking: it is played without physical contact with it and in the same way as we sing but by using hands in place of our vocal cords. In this paper we consider how to implement the theremin with a computer using very different physical principles of optical hand tracking and by adding advantages of visual interfaces. The goal of this research is to eventually fulfill the dream of the inventor to make the theremin a musical instrument for everyone and to prove that everyone can play music.", "fno": "229700a057", "keywords": [ "Electronic Music", "Gesture Recognition", "Motion Control", "Music", "Musical Instruments", "Music", "Leap Motion Controller", "Electronic Musical Instrument", "Vocal Cords", "Optical Hand Tracking", "Visual Interfaces", "Two Dimensional Displays", "Multimodal Interaction And Rendering Art And Heritage In Cyberspace Theremin" ], "authors": [ { "affiliation": "Nanyang Technological University", "fullName": "Alexei Sourin", "givenName": "Alexei", "surname": "Sourin", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "57-60", "year": "2019", "issn": null, "isbn": "978-1-7281-2297-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "229700a053", "articleId": "1fHklgptlIs", "__typename": "AdjacentArticleType" }, "next": { "fno": "229700a061", "articleId": "1fHkoP8izEQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vlsid/2012/4638/0/4638a119", "title": "Real-time Melodic Accompaniment System for Indian Music Using TMS320C6713", "doi": null, "abstractUrl": "/proceedings-article/vlsid/2012/4638a119/12OmNBOCWve", "parentPublication": { "id": "proceedings/vlsid/2012/4638/0", "title": "VLSI Design, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ici/2011/4618/0/4618a097", "title": "MIDI Conversion to Musical Notation", "doi": null, "abstractUrl": "/proceedings-article/ici/2011/4618a097/12OmNwDSdjs", "parentPublication": { "id": "proceedings/ici/2011/4618/0", "title": "Informatics and Computational Intelligence, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciic/2010/4152/0/4152a189", "title": "Design of an Architecture for 'An E-Tutor for Music'", "doi": null, "abstractUrl": "/proceedings-article/iciic/2010/4152a189/12OmNwdL7g2", "parentPublication": { "id": "proceedings/iciic/2010/4152/0", "title": "Integrated Intelligent Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sive/2015/1969/0/07361285", "title": "A conceptual framework for motion based music applications", "doi": null, "abstractUrl": "/proceedings-article/sive/2015/07361285/12OmNyUFfL0", "parentPublication": { "id": "proceedings/sive/2015/1969/0", "title": "2015 IEEE 2nd VR Workshop on Sonic Interactions for Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2017/3588/0/3588a341", "title": "Evaluate Leap Motion Control for Multiple Hand Posture Recognition", "doi": null, "abstractUrl": "/proceedings-article/svr/2017/3588a341/12OmNyrIaAc", "parentPublication": { "id": "proceedings/svr/2017/3588/0", "title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2015/7204/0/7204a255", "title": "Visual and Interactive Performance of Particles Conducted by the Leap Motion for an Orchestral Arrangement", "doi": null, "abstractUrl": "/proceedings-article/svr/2015/7204a255/12OmNyrZLBh", "parentPublication": { "id": "proceedings/svr/2015/7204/0", "title": "2015 XVII Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a906", "title": "Band Overdrive: A Multi-Instrument Virtual Reality Music Rhythm Game", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a906/1CJcIGUC37O", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmct/2022/7362/0/736200a010", "title": "A Virtual Ethnic Musical Instrument Platform Based on Web App", "doi": null, "abstractUrl": "/proceedings-article/icmct/2022/736200a010/1Ml2hqurrva", "parentPublication": { "id": "proceedings/icmct/2022/7362/0", "title": "2022 7th International Conference on Multimedia Communication Technologies (ICMCT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797825", "title": "Coretet: A 21<sup>st</sup> Century Virtual Reality Musical Instrument for Solo and Networked Ensemble Performance", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797825/1cJ1g65n5iU", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090649", "title": "Musical Brush: Exploring Creativity in an AR-based Tool Combining Music and Drawing Generation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090649/1jIxnfS0xfq", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1nkDclx75Kg", "title": "2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC)", "acronym": "compsac", "groupId": "1000143", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1nkDglfxgB2", "doi": "10.1109/COMPSAC48688.2020.0-215", "title": "Machine Learning Applied to Support Medical Decision in Transthoracic Echocardiogram Exams: A Systematic Review", "normalizedTitle": "Machine Learning Applied to Support Medical Decision in Transthoracic Echocardiogram Exams: A Systematic Review", "abstract": "The echocardiogram (ECHO) is an ultrasound of the heart used to diagnose heart diseases (DHC). The analysis and interpretation of ECHO are dependent on the doctor's experience. However, software that uses artificial intelligence to analyze ECHO images or videos is contributing to support the physician's decision. This paper aims to perform a Systematic Literature Review (SLR) on artificial intelligence (AI) techniques applied in the automation of Transthoracic Echocardiogram (TTE) processes, to support medical decisions. The study identified more than 800 articles on the topic in the leading scientific research platforms. To select the most relevant studies, inclusion and exclusion criteria were applied, where 45 articles were selected to compose the detailed study of the SRL. The results obtained with the extraction of information from the papers, identified 3 groups of primary studies, namely: identification of the cardiac vision plan, analysis of cardiac functions and detection of cardiac diseases. SRL identifies that the set of Machine learning (ML) techniques are being widely applied in the tasks of segmentation, detection and classification of images obtained from ECHO. The techniques based on Convolutional Neural Network (CNN), presented the best Accuracy rates. Research shows a strong interest in automating ECHO processes. However, it is still an open research field, with the potential to generate many publications for researchers.", "abstracts": [ { "abstractType": "Regular", "content": "The echocardiogram (ECHO) is an ultrasound of the heart used to diagnose heart diseases (DHC). The analysis and interpretation of ECHO are dependent on the doctor's experience. However, software that uses artificial intelligence to analyze ECHO images or videos is contributing to support the physician's decision. This paper aims to perform a Systematic Literature Review (SLR) on artificial intelligence (AI) techniques applied in the automation of Transthoracic Echocardiogram (TTE) processes, to support medical decisions. The study identified more than 800 articles on the topic in the leading scientific research platforms. To select the most relevant studies, inclusion and exclusion criteria were applied, where 45 articles were selected to compose the detailed study of the SRL. The results obtained with the extraction of information from the papers, identified 3 groups of primary studies, namely: identification of the cardiac vision plan, analysis of cardiac functions and detection of cardiac diseases. SRL identifies that the set of Machine learning (ML) techniques are being widely applied in the tasks of segmentation, detection and classification of images obtained from ECHO. The techniques based on Convolutional Neural Network (CNN), presented the best Accuracy rates. Research shows a strong interest in automating ECHO processes. However, it is still an open research field, with the potential to generate many publications for researchers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The echocardiogram (ECHO) is an ultrasound of the heart used to diagnose heart diseases (DHC). The analysis and interpretation of ECHO are dependent on the doctor's experience. However, software that uses artificial intelligence to analyze ECHO images or videos is contributing to support the physician's decision. This paper aims to perform a Systematic Literature Review (SLR) on artificial intelligence (AI) techniques applied in the automation of Transthoracic Echocardiogram (TTE) processes, to support medical decisions. The study identified more than 800 articles on the topic in the leading scientific research platforms. To select the most relevant studies, inclusion and exclusion criteria were applied, where 45 articles were selected to compose the detailed study of the SRL. The results obtained with the extraction of information from the papers, identified 3 groups of primary studies, namely: identification of the cardiac vision plan, analysis of cardiac functions and detection of cardiac diseases. SRL identifies that the set of Machine learning (ML) techniques are being widely applied in the tasks of segmentation, detection and classification of images obtained from ECHO. The techniques based on Convolutional Neural Network (CNN), presented the best Accuracy rates. Research shows a strong interest in automating ECHO processes. However, it is still an open research field, with the potential to generate many publications for researchers.", "fno": "730300a400", "keywords": [ "Convolutional Neural Nets", "Decision Support Systems", "Diseases", "Echocardiography", "Image Classification", "Image Segmentation", "Learning Artificial Intelligence", "Medical Image Processing", "Diagnose Heart Diseases", "DHC", "ECHO Images", "Systematic Literature Review", "Artificial Intelligence Techniques", "Exclusion Criteria", "Cardiac Vision Plan", "Cardiac Functions", "Cardiac Diseases", "Machine Learning Techniques", "Transthoracic Echocardiogram Exams", "Physician Decision", "SLR", "TTE", "Scientific Research Platforms", "Inclusion Criteria", "Image Classification", "Image Detection", "Image Segmentation", "Convolutional Neural Network", "CNN", "Support Medical Decision", "Two Dimensional Displays", "Three Dimensional Displays", "Machine Learning", "Task Analysis", "Medical Diagnostic Imaging", "Echocardiogram Ecocardiography Machine Learning Deep Learning Systematic Review" ], "authors": [ { "affiliation": "Federal Institute of Tocantins", "fullName": "Vilson Soares de Siqueira", "givenName": "Vilson", "surname": "Soares de Siqueira", "__typename": "ArticleAuthorType" }, { "affiliation": "Federal Institute of Tocantins", "fullName": "Diego de Castro Rodrigues", "givenName": "Diego", "surname": "de Castro Rodrigues", "__typename": "ArticleAuthorType" }, { "affiliation": "Diagnostic Imaging Center, CDI", "fullName": "Colandy Nunes Dourado", "givenName": "Colandy", "surname": "Nunes Dourado", "__typename": "ArticleAuthorType" }, { "affiliation": "Diagnostic Imaging Center, CDI", "fullName": "Moisés Marcos Borges", "givenName": "Moisés", "surname": "Marcos Borges", "__typename": "ArticleAuthorType" }, { "affiliation": "Diagnostic Imaging Center, CDI", "fullName": "Rogério Gomes Furtado", "givenName": "Rogério", "surname": "Gomes Furtado", "__typename": "ArticleAuthorType" }, { "affiliation": "Federal University of Goias", "fullName": "Higor Pereira Delfino", "givenName": "Higor", "surname": "Pereira Delfino", "__typename": "ArticleAuthorType" }, { "affiliation": "Federal University of Goias", "fullName": "Diego Stelle", "givenName": "Diego", "surname": "Stelle", "__typename": "ArticleAuthorType" }, { "affiliation": "Federal University of Goias", "fullName": "Rommel Melgaço Barbosa", "givenName": "Rommel", "surname": "Melgaço Barbosa", "__typename": "ArticleAuthorType" }, { "affiliation": "Federal University of Goias", "fullName": "Ronaldo Martins da Costa", "givenName": "Ronaldo", "surname": "Martins da Costa", "__typename": "ArticleAuthorType" } ], "idPrefix": "compsac", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-07-01T00:00:00", "pubType": "proceedings", "pages": "400-407", "year": "2020", "issn": "0730-3157", "isbn": "978-1-7281-7303-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "730300a394", "articleId": "1nkDje2yqcw", "__typename": "AdjacentArticleType" }, "next": { "fno": "730300a408", "articleId": "1nkDgNVScve", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2009/3992/0/05206838", "title": "Echocardiogram view classification using edge filtered scale-invariant motion features", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206838/12OmNCbU31H", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccima/2007/3050/3/30500415", "title": "A Novel Method for Echocardiogram Boundary Detection Using Adaptive Neuro -Fuzzy Systems", "doi": null, "abstractUrl": "/proceedings-article/iccima/2007/30500415/12OmNqN6R4V", "parentPublication": { "id": "proceedings/iccima/2007/3050/3", "title": "2007 International Conference on Computational Intelligence and Multimedia Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2007/1630/0/04408867", "title": "Automatic Cardiac View Classification of Echocardiogram", "doi": null, "abstractUrl": "/proceedings-article/iccv/2007/04408867/12OmNsd6vj7", "parentPublication": { "id": "proceedings/iccv/2007/1630/0", "title": "2007 11th IEEE International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1994/1775/3/00389591", "title": "Echocardiogram structure and tissue classification using hierarchical fuzzy neural networks", "doi": null, "abstractUrl": "/proceedings-article/icassp/1994/00389591/12OmNvHY2GB", "parentPublication": { "id": "proceedings/icassp/1994/1775/3", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a568", "title": "Segmentation of Anatomical Structures in Four-Chamber View Echocardiogram Images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a568/12OmNvqW6XY", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543599", "title": "Cardiac disease detection from echocardiogram using edge filtered scale-invariant motion features", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543599/12OmNwE9Oui", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2018/5377/0/537701a382", "title": "Extracting Key Findings Compared In an Echocardiogram Report", "doi": null, "abstractUrl": "/proceedings-article/ichi/2018/537701a382/12OmNwNwzNq", "parentPublication": { "id": "proceedings/ichi/2018/5377/0", "title": "2018 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204054", "title": "Automatic estimation of left ventricular dysfunction from echocardiogram videos", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204054/12OmNxbEtNT", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iisa/2018/8161/0/08633592", "title": "Non-Immersive Virtual Cardiac Auscultation Interactions Employing a 3D Printed Stethoscope", "doi": null, "abstractUrl": "/proceedings-article/iisa/2018/08633592/17D45WHONjt", "parentPublication": { "id": "proceedings/iisa/2018/8161/0", "title": "2018 9th International Conference on Information, Intelligence, Systems and Applications (IISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2020/6090/0/09155835", "title": "Impacts of Observational Learning and Self-regulated Learning Mechanisms on Online Learning Performance: A Case Study on High School Mathematics Course", "doi": null, "abstractUrl": "/proceedings-article/icalt/2020/09155835/1m1j4bdYD1m", "parentPublication": { "id": "proceedings/icalt/2020/6090/0", "title": "2020 IEEE 20th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyoiYVr", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNwpGgNQ", "doi": "10.1109/CVPR.2017.383", "title": "Simultaneous Geometric and Radiometric Calibration of a Projector-Camera Pair", "normalizedTitle": "Simultaneous Geometric and Radiometric Calibration of a Projector-Camera Pair", "abstract": "We present a novel method that allows for simultaneous geometric and radiometric calibration of a projector-camera pair. It is simple and does not require specialized hardware. We prewarp and align a specially designed projection pattern onto a printed pattern of different colorimetric properties. After capturing the patterns in several orientations, we perform geometric calibration by estimating the corner locations of the two patterns in different color channels. We perform radiometric calibration of the projector by using the information contained inside the projected squares. We show that our method performs on par with current approaches that all require separate geometric and radiometric calibration, while being more efficient and user friendly.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel method that allows for simultaneous geometric and radiometric calibration of a projector-camera pair. It is simple and does not require specialized hardware. We prewarp and align a specially designed projection pattern onto a printed pattern of different colorimetric properties. After capturing the patterns in several orientations, we perform geometric calibration by estimating the corner locations of the two patterns in different color channels. We perform radiometric calibration of the projector by using the information contained inside the projected squares. We show that our method performs on par with current approaches that all require separate geometric and radiometric calibration, while being more efficient and user friendly.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel method that allows for simultaneous geometric and radiometric calibration of a projector-camera pair. It is simple and does not require specialized hardware. We prewarp and align a specially designed projection pattern onto a printed pattern of different colorimetric properties. After capturing the patterns in several orientations, we perform geometric calibration by estimating the corner locations of the two patterns in different color channels. We perform radiometric calibration of the projector by using the information contained inside the projected squares. We show that our method performs on par with current approaches that all require separate geometric and radiometric calibration, while being more efficient and user friendly.", "fno": "0457d596", "keywords": [ "Calibration", "Cameras", "Colorimetry", "Optical Projectors", "Specially Designed Projection Pattern", "Printed Pattern", "Geometric Calibration", "Radiometric Calibration", "Projector Camera Pair", "Colorimetric Properties", "Calibration", "Cameras", "Radiometry", "Image Color Analysis", "Three Dimensional Displays", "Encoding", "Ink" ], "authors": [ { "affiliation": null, "fullName": "Marjan Shahpaski", "givenName": "Marjan", "surname": "Shahpaski", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Luis Ricardo Sapaico", "givenName": "Luis Ricardo", "surname": "Sapaico", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Gaspard Chevassus", "givenName": "Gaspard", "surname": "Chevassus", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Sabine Süsstrunk", "givenName": "Sabine", "surname": "Süsstrunk", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-07-01T00:00:00", "pubType": "proceedings", "pages": "3596-3604", "year": "2017", "issn": "1063-6919", "isbn": "978-1-5386-0457-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "0457d587", "articleId": "12OmNwbLVoY", "__typename": "AdjacentArticleType" }, "next": { "fno": "0457d605", "articleId": "12OmNywfKFh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457a275", "title": "Radiometric Calibration for Internet Photo Collections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457a275/12OmNvRU0rK", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/2/01315266", "title": "Radiometric calibration from a single image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315266/12OmNweTvNz", "parentPublication": { "id": "proceedings/cvpr/2004/2158/2", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457b695", "title": "Radiometric Calibration from Faces in Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457b695/12OmNwlqhJo", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2011/707/0/05753117", "title": "Radiometric calibration using photo collections", "doi": null, "abstractUrl": "/proceedings-article/iccp/2011/05753117/12OmNxGALee", "parentPublication": { "id": "proceedings/iccp/2011/707/0", "title": "IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130328", "title": "Photometric stereo with auto-radiometric calibration", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130328/12OmNyPQ4A9", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/1/01315067", "title": "Making one object look like another: controlling appearance using a projector-camera system", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315067/12OmNzcPAjA", "parentPublication": { "id": "proceedings/cvpr/2004/2158/1", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/01/ttp2013010144", "title": "Radiometric Calibration by Rank Minimization", "doi": null, "abstractUrl": "/journal/tp/2013/01/ttp2013010144/13rRUwwJWH0", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c831", "title": "Self-Calibrating Polarising Radiometric Calibration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c831/17D45Xh13wi", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/07/08651349", "title": "Ambiguity-Free Radiometric Calibration for Internet Photo Collections", "doi": null, "abstractUrl": "/journal/tp/2020/07/08651349/1koL5eVW1lC", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382896", "title": "DeProCams: Simultaneous Relighting, Compensation and Shape Reconstruction for Projector-Camera Systems", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382896/1saZvVKgpFK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1gyr6w5YIIU", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1gyraGpeGyY", "doi": "10.1109/CVPR.2019.00697", "title": "End-To-End Projector Photometric Compensation", "normalizedTitle": "End-To-End Projector Photometric Compensation", "abstract": "Projector photometric compensation aims to modify a projector input image such that it can compensate for disturbance from the appearance of projection surface. In this paper, for the first time, we formulate the compensation problem as an end-to-end learning problem and propose a convolutional neural network, named CompenNet, to implicitly learn the complex compensation function. CompenNet consists of a UNet-like backbone network and an autoencoder subnet. Such architecture encourages rich multi-level interactions between the camera-captured projection surface image and the input image, and thus captures both photometric and environment information of the projection surface. In addition, the visual details and interaction information are carried to deeper layers along the multi-level skip convolution layers. The architecture is of particular importance for the projector compensation task, for which only a small training dataset is allowed in practice. Another contribution we make is a novel evaluation benchmark, which is independent of system setup and thus quantitatively verifiable. Such benchmark is not previously available, to our best knowledge, due to the fact that conventional evaluation requests the hardware system to actually project the final results. Our key idea, motivated from our end-to-end problem formulation, is to use a reasonable surrogate to avoid such projection process so as to be setup-independent. Our method is evaluated carefully on the benchmark, and the results show that our end-to-end learning solution outperforms state-of-the-arts both qualitatively and quantitatively by a significant margin.", "abstracts": [ { "abstractType": "Regular", "content": "Projector photometric compensation aims to modify a projector input image such that it can compensate for disturbance from the appearance of projection surface. In this paper, for the first time, we formulate the compensation problem as an end-to-end learning problem and propose a convolutional neural network, named CompenNet, to implicitly learn the complex compensation function. CompenNet consists of a UNet-like backbone network and an autoencoder subnet. Such architecture encourages rich multi-level interactions between the camera-captured projection surface image and the input image, and thus captures both photometric and environment information of the projection surface. In addition, the visual details and interaction information are carried to deeper layers along the multi-level skip convolution layers. The architecture is of particular importance for the projector compensation task, for which only a small training dataset is allowed in practice. Another contribution we make is a novel evaluation benchmark, which is independent of system setup and thus quantitatively verifiable. Such benchmark is not previously available, to our best knowledge, due to the fact that conventional evaluation requests the hardware system to actually project the final results. Our key idea, motivated from our end-to-end problem formulation, is to use a reasonable surrogate to avoid such projection process so as to be setup-independent. Our method is evaluated carefully on the benchmark, and the results show that our end-to-end learning solution outperforms state-of-the-arts both qualitatively and quantitatively by a significant margin.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Projector photometric compensation aims to modify a projector input image such that it can compensate for disturbance from the appearance of projection surface. In this paper, for the first time, we formulate the compensation problem as an end-to-end learning problem and propose a convolutional neural network, named CompenNet, to implicitly learn the complex compensation function. CompenNet consists of a UNet-like backbone network and an autoencoder subnet. Such architecture encourages rich multi-level interactions between the camera-captured projection surface image and the input image, and thus captures both photometric and environment information of the projection surface. In addition, the visual details and interaction information are carried to deeper layers along the multi-level skip convolution layers. The architecture is of particular importance for the projector compensation task, for which only a small training dataset is allowed in practice. Another contribution we make is a novel evaluation benchmark, which is independent of system setup and thus quantitatively verifiable. Such benchmark is not previously available, to our best knowledge, due to the fact that conventional evaluation requests the hardware system to actually project the final results. Our key idea, motivated from our end-to-end problem formulation, is to use a reasonable surrogate to avoid such projection process so as to be setup-independent. Our method is evaluated carefully on the benchmark, and the results show that our end-to-end learning solution outperforms state-of-the-arts both qualitatively and quantitatively by a significant margin.", "fno": "329300g803", "keywords": [ "Cameras", "Compensation", "Computerised Instrumentation", "Convolutional Neural Nets", "Image Coding", "Learning Artificial Intelligence", "Photometry", "End To End Learning Problem", "Convolutional Neural Network", "Complex Compensation Function", "Photometric Environment Information", "Multilevel Skip Convolution Layers", "End To End Projector Photometric Compensation", "Camera Captured Projection Surface Imaging", "Compen Net Function", "U Net Like Backbone Network", "Autoencoder Subnet", "Training", "Visualization", "Computer Vision", "Convolution", "Computer Architecture", "Benchmark Testing", "Hardware", "Vision Graphics", "Computational Photography", "Deep Learning", "Others" ], "authors": [ { "affiliation": "Temple Univ.", "fullName": "Bingyao Huang", "givenName": "Bingyao", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Temple Univ.", "fullName": "Haibin Ling", "givenName": "Haibin", "surname": "Ling", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-06-01T00:00:00", "pubType": "proceedings", "pages": "6803-6812", "year": "2019", "issn": null, "isbn": "978-1-7281-3293-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "329300g793", "articleId": "1gys5JVbdAY", "__typename": "AdjacentArticleType" }, "next": { "fno": "329300g813", "articleId": "1gyrpvniHiU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2013/4990/0/4990a924", "title": "Practical Non-linear Photometric Projector Compensation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2013/4990a924/12OmNBQkwYN", "parentPublication": { "id": "proceedings/cvprw/2013/4990/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2006/2646/0/26460006", "title": "Robust Content-Dependent Photometric Projector Compensation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2006/26460006/12OmNzYNN6k", "parentPublication": { "id": "proceedings/cvprw/2006/2646/0", "title": "2006 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/01/ttg2008010097", "title": "Real-Time Adaptive Radiometric Compensation", "doi": null, "abstractUrl": "/journal/tg/2008/01/ttg2008010097/13rRUwhpBE2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cost/2022/6248/0/624800a180", "title": "Review of Photometric Compensation in Projection System", "doi": null, "abstractUrl": "/proceedings-article/cost/2022/624800a180/1H2pilDW8ww", "parentPublication": { "id": "proceedings/cost/2022/6248/0", "title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a449", "title": "Extended Depth-of-Field Projector using Learned Diffractive Optics", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a449/1MNgNe272U0", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a135", "title": "CompenHR: Efficient Full Compensation for High-resolution Projector", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a135/1MNgmceltOU", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h164", "title": "CompenNet++: End-to-End Full Projector Compensation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h164/1hQqyKnlCEM", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2005/2372/2/01467579", "title": "A projector-camera system with real-time photometric adaptation for dynamic environments", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2005/01467579/1htC67moXAs", "parentPublication": { "id": "proceedings/cvpr/2005/2372/2", "title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/06/09318552", "title": "End-to-End Full Projector Compensation", "doi": null, "abstractUrl": "/journal/tp/2022/06/09318552/1qdT3YKBd5u", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382896", "title": "DeProCams: Simultaneous Relighting, Compensation and Shape Reconstruction for Projector-Camera Systems", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382896/1saZvVKgpFK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hQqfuoOyHu", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hQqyKnlCEM", "doi": "10.1109/ICCV.2019.00726", "title": "CompenNet++: End-to-End Full Projector Compensation", "normalizedTitle": "CompenNet++: End-to-End Full Projector Compensation", "abstract": "Full projector compensation aims to modify a projector input image such that it can compensate for both geometric and photometric disturbance of the projection surface. Traditional methods usually solve the two parts separately, although they are known to correlate with each other. In this paper, we propose the first end-to-end solution, named CompenNet++, to solve the two problems jointly. Our work non-trivially extends CompenNet, which was recently proposed for photometric compensation with promising performance. First, we propose a novel geometric correction subnet, which is designed with a cascaded coarse-to-fine structure to learn the sampling grid directly from photometric sampling images. Second, by concatenating the geometric correction subset with CompenNet, CompenNet++ accomplishes full projector compensation and is end-to-end trainable. Third, after training, we significantly simplify both geometric and photometric compensation parts, and hence largely improves the running time efficiency. Moreover, we construct the first setup-independent full compensation benchmark to facilitate the study on this topic. In our thorough experiments, our method shows clear advantages over previous arts with promising compensation quality and meanwhile being practically convenient.", "abstracts": [ { "abstractType": "Regular", "content": "Full projector compensation aims to modify a projector input image such that it can compensate for both geometric and photometric disturbance of the projection surface. Traditional methods usually solve the two parts separately, although they are known to correlate with each other. In this paper, we propose the first end-to-end solution, named CompenNet++, to solve the two problems jointly. Our work non-trivially extends CompenNet, which was recently proposed for photometric compensation with promising performance. First, we propose a novel geometric correction subnet, which is designed with a cascaded coarse-to-fine structure to learn the sampling grid directly from photometric sampling images. Second, by concatenating the geometric correction subset with CompenNet, CompenNet++ accomplishes full projector compensation and is end-to-end trainable. Third, after training, we significantly simplify both geometric and photometric compensation parts, and hence largely improves the running time efficiency. Moreover, we construct the first setup-independent full compensation benchmark to facilitate the study on this topic. In our thorough experiments, our method shows clear advantages over previous arts with promising compensation quality and meanwhile being practically convenient.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Full projector compensation aims to modify a projector input image such that it can compensate for both geometric and photometric disturbance of the projection surface. Traditional methods usually solve the two parts separately, although they are known to correlate with each other. In this paper, we propose the first end-to-end solution, named CompenNet++, to solve the two problems jointly. Our work non-trivially extends CompenNet, which was recently proposed for photometric compensation with promising performance. First, we propose a novel geometric correction subnet, which is designed with a cascaded coarse-to-fine structure to learn the sampling grid directly from photometric sampling images. Second, by concatenating the geometric correction subset with CompenNet, CompenNet++ accomplishes full projector compensation and is end-to-end trainable. Third, after training, we significantly simplify both geometric and photometric compensation parts, and hence largely improves the running time efficiency. Moreover, we construct the first setup-independent full compensation benchmark to facilitate the study on this topic. In our thorough experiments, our method shows clear advantages over previous arts with promising compensation quality and meanwhile being practically convenient.", "fno": "480300h164", "keywords": [ "Compensation", "Feature Extraction", "Image Sampling", "Learning Artificial Intelligence", "Optical Projectors", "End To End Full Projector Compensation", "Projector Input Image", "Geometric Disturbance", "Photometric Disturbance", "Projection Surface", "Photometric Compensation", "Geometric Correction Subnet", "Photometric Sampling Images", "Geometric Correction Subset", "Full Projector Compensation", "Compensation Benchmark", "Compensation Quality", "Surface Texture", "Measurement", "Geometry", "Image Color Analysis", "Cameras", "Face", "Training" ], "authors": [ { "affiliation": "Temple University", "fullName": "Bingyao Huang", "givenName": "Bingyao", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Temple University", "fullName": "Haibin Ling", "givenName": "Haibin", "surname": "Ling", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "7164-7173", "year": "2019", "issn": null, "isbn": "978-1-7281-4803-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "480300h153", "articleId": "1hVlFj4REmk", "__typename": "AdjacentArticleType" }, "next": { "fno": "480300h174", "articleId": "1hVlNf9VCQE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2013/4990/0/4990a924", "title": "Practical Non-linear Photometric Projector Compensation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2013/4990a924/12OmNBQkwYN", "parentPublication": { "id": "proceedings/cvprw/2013/4990/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2000/6478/0/64780017", "title": "Achieving Color Uniformity Across Multi-Projector Displays", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780017/12OmNwlHSVv", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2006/2646/0/26460010", "title": "Robust and Accurate Visual Echo Cancelation in a Full-Duplex Projector-Camera System", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2006/26460010/12OmNyGtjis", "parentPublication": { "id": "proceedings/cvprw/2006/2646/0", "title": "2006 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2006/2646/0/26460006", "title": "Robust Content-Dependent Photometric Projector Compensation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2006/26460006/12OmNzYNN6k", "parentPublication": { "id": "proceedings/cvprw/2006/2646/0", "title": "2006 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2008/10/ttp2008101831", "title": "Robust and Accurate Visual Echo Cancelation in a Full-duplex Projector-Camera System", "doi": null, "abstractUrl": "/journal/tp/2008/10/ttp2008101831/13rRUxjQyip", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cost/2022/6248/0/624800a180", "title": "Review of Photometric Compensation in Projection System", "doi": null, "abstractUrl": "/proceedings-article/cost/2022/624800a180/1H2pilDW8ww", "parentPublication": { "id": "proceedings/cost/2022/6248/0", "title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a135", "title": "CompenHR: Efficient Full Compensation for High-resolution Projector", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a135/1MNgmceltOU", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300g803", "title": "End-To-End Projector Photometric Compensation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300g803/1gyraGpeGyY", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/06/09318552", "title": "End-to-End Full Projector Compensation", "doi": null, "abstractUrl": "/journal/tp/2022/06/09318552/1qdT3YKBd5u", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382896", "title": "DeProCams: Simultaneous Relighting, Compensation and Shape Reconstruction for Projector-Camera Systems", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382896/1saZvVKgpFK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqJq4iJ", "title": "2015 Big Data Visual Analytics (BDVA)", "acronym": "bdva", "groupId": "1809805", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNylKAXL", "doi": "10.1109/BDVA.2015.7314287", "title": "Big Text Visual Analytics in Sensemaking", "normalizedTitle": "Big Text Visual Analytics in Sensemaking", "abstract": "Learning from text data often involves a loop of tasks that iterate between foraging for information and synthesizing it in incremental hypotheses. Past research has shown the advantages of using spatial workspaces as a means for synthesizing information through externalizing hypotheses and creating spatial schemas. However, spatializing the entirety of datasets becomes prohibitive as the number of documents available to the analysts grows, particularly when only a small subset are relevant to the tasks at hand. To address this issue, we applied the multi-model semantic interaction (MSI) technique, which leverages user interactions to aid in the display layout (as was seen in previous semantic interaction work), forage for new, relevant documents as implied by the interactions, and place them in context of the user's existing spatial layout. Thus, this approach cleanly embeds visual analytics of big text collections directly into the human sensemaking process.", "abstracts": [ { "abstractType": "Regular", "content": "Learning from text data often involves a loop of tasks that iterate between foraging for information and synthesizing it in incremental hypotheses. Past research has shown the advantages of using spatial workspaces as a means for synthesizing information through externalizing hypotheses and creating spatial schemas. However, spatializing the entirety of datasets becomes prohibitive as the number of documents available to the analysts grows, particularly when only a small subset are relevant to the tasks at hand. To address this issue, we applied the multi-model semantic interaction (MSI) technique, which leverages user interactions to aid in the display layout (as was seen in previous semantic interaction work), forage for new, relevant documents as implied by the interactions, and place them in context of the user's existing spatial layout. Thus, this approach cleanly embeds visual analytics of big text collections directly into the human sensemaking process.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Learning from text data often involves a loop of tasks that iterate between foraging for information and synthesizing it in incremental hypotheses. Past research has shown the advantages of using spatial workspaces as a means for synthesizing information through externalizing hypotheses and creating spatial schemas. However, spatializing the entirety of datasets becomes prohibitive as the number of documents available to the analysts grows, particularly when only a small subset are relevant to the tasks at hand. To address this issue, we applied the multi-model semantic interaction (MSI) technique, which leverages user interactions to aid in the display layout (as was seen in previous semantic interaction work), forage for new, relevant documents as implied by the interactions, and place them in context of the user's existing spatial layout. Thus, this approach cleanly embeds visual analytics of big text collections directly into the human sensemaking process.", "fno": "07314287", "keywords": [ "Big Data", "Data Visualisation", "Human Computer Interaction", "Learning Artificial Intelligence", "Text Analysis", "Human Sensemaking Process", "Big Text Collection", "MSI Technique", "Multimodel Semantic Interaction Technique", "Learning", "Big Text Visual Analytics", "Information Retrieval", "Semantics", "Layout", "Context", "Visualization", "Encoding", "IEEE Xplore" ], "authors": [ { "affiliation": null, "fullName": "Lauren Bradel", "givenName": "Lauren", "surname": "Bradel", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Nathan Wycoff", "givenName": "Nathan", "surname": "Wycoff", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Leanna House", "givenName": "Leanna", "surname": "House", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chris North", "givenName": "Chris", "surname": "North", "__typename": "ArticleAuthorType" } ], "idPrefix": "bdva", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "1-8", "year": "2015", "issn": null, "isbn": "978-1-4673-7343-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07314286", "articleId": "12OmNybfqVl", "__typename": "AdjacentArticleType" }, "next": { "fno": "07314288", "articleId": "12OmNButq1p", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/big-data/2014/5666/0/07004262", "title": "Evaluating density-based motion for big data visual analytics", "doi": null, "abstractUrl": "/proceedings-article/big-data/2014/07004262/12OmNB0nWeq", "parentPublication": { "id": "proceedings/big-data/2014/5666/0", "title": "2014 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2014/6227/0/07042492", "title": "Multi-model semantic interaction for text analytics", "doi": null, "abstractUrl": "/proceedings-article/vast/2014/07042492/12OmNBkP3EP", "parentPublication": { "id": "proceedings/vast/2014/6227/0", "title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851c479", "title": "Combining Markov Random Fields and Convolutional Neural Networks for Image Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851c479/12OmNx9FhLO", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wvl/1989/2002/0/00077050", "title": "The specification of visual language syntax", "doi": null, "abstractUrl": "/proceedings-article/wvl/1989/00077050/12OmNxveNOM", "parentPublication": { "id": "proceedings/wvl/1989/2002/0", "title": "1989 IEEE Workshop on Visual Languages", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2013/1293/0/06691787", "title": "Tile based visual analytics for Twitter big data exploratory analysis", "doi": null, "abstractUrl": "/proceedings-article/big-data/2013/06691787/12OmNzVXO0S", "parentPublication": { "id": "proceedings/big-data/2013/1293/0", "title": "2013 IEEE International Conference on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2013/04/mcs2013040066", "title": "Visual Document Retrieval: Supporting Text Search and Analysis with Visual Analytics", "doi": null, "abstractUrl": "/magazine/cs/2013/04/mcs2013040066/13rRUx0xPPu", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/04/ttp2009040591", "title": "Efficient Visual Search of Videos Cast as Text Retrieval", "doi": null, "abstractUrl": "/journal/tp/2009/04/ttp2009040591/13rRUxZzAiF", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2018/6861/0/08802424", "title": "The Effect of Semantic Interaction on Foraging in Text Analysis", "doi": null, "abstractUrl": "/proceedings-article/vast/2018/08802424/1cJ6XAJz7gc", "parentPublication": { "id": "proceedings/vast/2018/6861/0", "title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/09/09321177", "title": "Fast 3D Indoor Scene Synthesis by Learning Spatial Relation Priors of Objects", "doi": null, "abstractUrl": "/journal/tg/2022/09/09321177/1qkwF6Uf61y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/09/09427066", "title": "Learning Layout and Style Reconfigurable GANs for Controllable Image Synthesis", "doi": null, "abstractUrl": "/journal/tp/2022/09/09427066/1tuvzMfndhS", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1JrQPhTSspy", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1JrR13pPxBK", "doi": "10.1109/ISMAR55827.2022.00064", "title": "Evaluating the Benefits of Explicit and Semi-Automated Clusters for Immersive Sensemaking", "normalizedTitle": "Evaluating the Benefits of Explicit and Semi-Automated Clusters for Immersive Sensemaking", "abstract": "Immersive spaces have great potential to support analysts in complex sensemaking tasks, but the use of only manual interactions for organizing data elements can become tedious. We analyzed the user interactions to support cluster formation in an immersive sensemaking system, and we designed a semi-automated cluster creation technique that determines the user&#x2019;s intent to create a cluster based on object proximity. We present the results of a user study comparing this proximity-based technique with a manual clustering technique and a baseline immersive workspace with no explicit clustering support. We found that semi-automated clustering was faster and preferred, while manual clustering gave greater control to users. These results provide support for the approach of adding intelligent semantic interactions to aid the users of immersive analytics systems.", "abstracts": [ { "abstractType": "Regular", "content": "Immersive spaces have great potential to support analysts in complex sensemaking tasks, but the use of only manual interactions for organizing data elements can become tedious. We analyzed the user interactions to support cluster formation in an immersive sensemaking system, and we designed a semi-automated cluster creation technique that determines the user&#x2019;s intent to create a cluster based on object proximity. We present the results of a user study comparing this proximity-based technique with a manual clustering technique and a baseline immersive workspace with no explicit clustering support. We found that semi-automated clustering was faster and preferred, while manual clustering gave greater control to users. These results provide support for the approach of adding intelligent semantic interactions to aid the users of immersive analytics systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Immersive spaces have great potential to support analysts in complex sensemaking tasks, but the use of only manual interactions for organizing data elements can become tedious. We analyzed the user interactions to support cluster formation in an immersive sensemaking system, and we designed a semi-automated cluster creation technique that determines the user’s intent to create a cluster based on object proximity. We present the results of a user study comparing this proximity-based technique with a manual clustering technique and a baseline immersive workspace with no explicit clustering support. We found that semi-automated clustering was faster and preferred, while manual clustering gave greater control to users. These results provide support for the approach of adding intelligent semantic interactions to aid the users of immersive analytics systems.", "fno": "532500a479", "keywords": [ "Data Visualisation", "Pattern Clustering", "Virtual Reality", "Baseline Immersive Workspace", "Cluster Creation Technique", "Complex Sensemaking Tasks", "Data Elements", "Explicit Clustering Support", "Immersive Analytics Systems", "Immersive Sensemaking System", "Immersive Spaces", "Intelligent Semantic Interactions", "Manual Clustering Technique", "Manual Interactions", "Object Proximity", "Proximity Based Technique", "Semiautomated Clustering", "Support Analysts", "Support Cluster Formation", "User Interactions", "Semantics", "Manuals", "Task Analysis", "Artificial Intelligence", "Augmented Reality", "Virtual Reality", "Human AI Collaboration", "Semantic Interaction", "Immersive Analytics", "Clustering" ], "authors": [ { "affiliation": "Center for Human-Computer Interaction,Department of Computer Science Virginia Tech", "fullName": "Ibrahim A. Tahmid", "givenName": "Ibrahim A.", "surname": "Tahmid", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Human-Computer Interaction,Department of Computer Science Virginia Tech", "fullName": "Lee Lisle", "givenName": "Lee", "surname": "Lisle", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Human-Computer Interaction,Department of Computer Science Virginia Tech", "fullName": "Kylie Davidson", "givenName": "Kylie", "surname": "Davidson", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Human-Computer Interaction,Department of Computer Science Virginia Tech", "fullName": "Chris North", "givenName": "Chris", "surname": "North", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Human-Computer Interaction,Department of Computer Science Virginia Tech", "fullName": "Doug A. Bowman", "givenName": "Doug A.", "surname": "Bowman", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "479-488", "year": "2022", "issn": "1554-7868", "isbn": "978-1-6654-5325-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1JrR10bIkUg", "name": "pismar202253250-09995165s1-mm_532500a479.zip", "size": "162 kB", "location": "https://www.computer.org/csdl/api/v1/extra/pismar202253250-09995165s1-mm_532500a479.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "532500a470", "articleId": "1JrQZ2SKCuQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "532500a489", "articleId": "1JrQTvCTbhK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/eisic/2015/8657/0/8657a177", "title": "Guidelines for Sensemaking in Intelligence Analysis", "doi": null, "abstractUrl": "/proceedings-article/eisic/2015/8657a177/12OmNzTYBR1", "parentPublication": { "id": "proceedings/eisic/2015/8657/0", "title": "2015 European Intelligence and Security Informatics Conference (EISIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07194834", "title": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance", "doi": null, "abstractUrl": "/journal/tg/2016/01/07194834/13rRUEgarnM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2019/03/08698351", "title": "Immersive Analytics", "doi": null, "abstractUrl": "/magazine/cg/2019/03/08698351/19utOsQX9Nm", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09894094", "title": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think", "doi": null, "abstractUrl": "/journal/tg/5555/01/09894094/1GIqpC6j7na", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a085", "title": "MEinVR: Multimodal Interaction Paradigms in Immersive Exploration", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a085/1J7W98ABKwM", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2022/1041/0/10051781", "title": "Developing Low-Cost Mobile Immersive System MA-VRIOT with Physical Activity Interactions by Integrating IOT Technology", "doi": null, "abstractUrl": "/proceedings-article/snpd/2022/10051781/1LiNWiuucSc", "parentPublication": { "id": "proceedings/snpd/2022/1041/0", "title": "2022 IEEE/ACIS 23rd International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090546", "title": "[DC] The Immersive Space to Think: Immersive Analytics for Multimedia Data", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090546/1jIxrquhCNO", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090620", "title": "Evaluating the Benefits of the Immersive Space to Think", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090620/1jIxs5S1PwY", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382916", "title": "Exploring the SenseMaking Process through Interactions and fNIRS in Immersive Visualization", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382916/1saZna718yY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a529", "title": "Sensemaking Strategies with Immersive Space to Think", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a529/1tuAMAuN6kU", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qRNrlo577W", "title": "2020 IEEE Visualization Conference (VIS)", "acronym": "vis", "groupId": "1001944", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qROdAHfNWU", "doi": "10.1109/VIS47514.2020.00045", "title": "CrowdTrace: Visualizing Provenance in Distributed Sensemaking", "normalizedTitle": "CrowdTrace: Visualizing Provenance in Distributed Sensemaking", "abstract": "Capturing analytic provenance is important for refining sensemaking analysis. However, understanding this provenance can be difficult. First, making sense of the reasoning in intermediate steps is time-consuming. Especially in distributed sensemaking, the provenance is less cohesive because each analyst only sees a small portion of the data without an understanding of the overall collaboration workflow. Second, analysis errors from one step can propagate to later steps. Furthermore, in exploratory sensemaking, it is difficult to define what an error is since there are no correct answers to reference. In this paper, we explore provenance analysis for distributed sense-making in the context of crowdsourcing, where distributed analysis contributions are captured in microtasks. We propose crowd auditing as a way to help individual analysts visualize and trace provenance to debug distributed sensemaking. To evaluate this concept, we implemented a crowd auditing tool, CrowdTrace. Our user study-based evaluation demonstrates that CrowdTrace offers an effective mechanism to audit and refine multi-step crowd sensemaking.", "abstracts": [ { "abstractType": "Regular", "content": "Capturing analytic provenance is important for refining sensemaking analysis. However, understanding this provenance can be difficult. First, making sense of the reasoning in intermediate steps is time-consuming. Especially in distributed sensemaking, the provenance is less cohesive because each analyst only sees a small portion of the data without an understanding of the overall collaboration workflow. Second, analysis errors from one step can propagate to later steps. Furthermore, in exploratory sensemaking, it is difficult to define what an error is since there are no correct answers to reference. In this paper, we explore provenance analysis for distributed sense-making in the context of crowdsourcing, where distributed analysis contributions are captured in microtasks. We propose crowd auditing as a way to help individual analysts visualize and trace provenance to debug distributed sensemaking. To evaluate this concept, we implemented a crowd auditing tool, CrowdTrace. Our user study-based evaluation demonstrates that CrowdTrace offers an effective mechanism to audit and refine multi-step crowd sensemaking.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Capturing analytic provenance is important for refining sensemaking analysis. However, understanding this provenance can be difficult. First, making sense of the reasoning in intermediate steps is time-consuming. Especially in distributed sensemaking, the provenance is less cohesive because each analyst only sees a small portion of the data without an understanding of the overall collaboration workflow. Second, analysis errors from one step can propagate to later steps. Furthermore, in exploratory sensemaking, it is difficult to define what an error is since there are no correct answers to reference. In this paper, we explore provenance analysis for distributed sense-making in the context of crowdsourcing, where distributed analysis contributions are captured in microtasks. We propose crowd auditing as a way to help individual analysts visualize and trace provenance to debug distributed sensemaking. To evaluate this concept, we implemented a crowd auditing tool, CrowdTrace. Our user study-based evaluation demonstrates that CrowdTrace offers an effective mechanism to audit and refine multi-step crowd sensemaking.", "fno": "801400a191", "keywords": [ "Data Analysis", "Data Visualisation", "Inference Mechanisms", "Crowd Auditing", "Reasoning", "Provenance Analysis", "Exploratory Sensemaking", "Sensemaking Analysis", "Distributed Sensemaking", "Provenance Visualization", "Crowd Trace", "Crowdsourcing", "Conferences", "Refining", "Distributed Databases", "Collaboration", "Tools", "Cognition", "Crowdsourcing", "Sensemaking", "Crowd Auditing" ], "authors": [ { "affiliation": "Loyola University Chicago", "fullName": "Tianyi Li", "givenName": "Tianyi", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech", "fullName": "Yasmine Belghith", "givenName": "Yasmine", "surname": "Belghith", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech", "fullName": "Chris North", "givenName": "Chris", "surname": "North", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech", "fullName": "Kurt Luther", "givenName": "Kurt", "surname": "Luther", "__typename": "ArticleAuthorType" } ], "idPrefix": "vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-10-01T00:00:00", "pubType": "proceedings", "pages": "191-195", "year": "2020", "issn": null, "isbn": "978-1-7281-8014-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1qROd2COZWM", "name": "pvis202080140-09331303s1-mm_801400a191.zip", "size": "19.5 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvis202080140-09331303s1-mm_801400a191.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "801400a186", "articleId": "1qROmg6Kdi0", "__typename": "AdjacentArticleType" }, "next": { "fno": "801400a196", "articleId": "1qROE8ebCZq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vast/2016/5661/0/07883515", "title": "SenseMap: Supporting browser-based online sensemaking through analytic provenance", "doi": null, "abstractUrl": "/proceedings-article/vast/2016/07883515/12OmNz2kqj9", "parentPublication": { "id": "proceedings/vast/2016/5661/0", "title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eisic/2015/8657/0/8657a177", "title": "Guidelines for Sensemaking in Intelligence Analysis", "doi": null, "abstractUrl": "/proceedings-article/eisic/2015/8657a177/12OmNzTYBR1", "parentPublication": { "id": "proceedings/eisic/2015/8657/0", "title": "2015 European Intelligence and Security Informatics Conference (EISIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/e-science/2010/8957/0/05693917", "title": "Tracking and Sketching Distributed Data Provenance", "doi": null, "abstractUrl": "/proceedings-article/e-science/2010/05693917/12OmNzXnNCH", "parentPublication": { "id": "proceedings/e-science/2010/8957/0", "title": "E-Science 2010. 6th IEEE International Conference on E-Science (E-Science 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/03/mcg2015030056", "title": "Analytic Provenance for Sensemaking: A Research Agenda", "doi": null, "abstractUrl": "/magazine/cg/2015/03/mcg2015030056/13rRUB7a13F", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07194834", "title": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance", "doi": null, "abstractUrl": "/journal/tg/2016/01/07194834/13rRUEgarnM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2014/03/mco2014030038", "title": "Investigating Collaborative Sensemaking Behavior in Collaborative Information Seeking", "doi": null, "abstractUrl": "/magazine/co/2014/03/mco2014030038/13rRUyv53J7", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08500765", "title": "Enhancing Web-based Analytics Applications through Provenance", "doi": null, "abstractUrl": "/journal/tg/2019/01/08500765/17D45WYQJ6B", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2017/3163/0/08585484", "title": "CRICTO: Supporting Sensemaking through Crowdsourced Information Schematization", "doi": null, "abstractUrl": "/proceedings-article/vast/2017/08585484/17D45Wc1ILV", "parentPublication": { "id": "proceedings/vast/2017/3163/0", "title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09894094", "title": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think", "doi": null, "abstractUrl": "/journal/tg/5555/01/09894094/1GIqpC6j7na", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382916", "title": "Exploring the SenseMaking Process through Interactions and fNIRS in Immersive Visualization", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382916/1saZna718yY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tuAeQeDJja", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tuAMAuN6kU", "doi": "10.1109/VR50410.2021.00077", "title": "Sensemaking Strategies with Immersive Space to Think", "normalizedTitle": "Sensemaking Strategies with Immersive Space to Think", "abstract": "The process of sensemaking involves foraging through and extracting information from large sets of documents, and it can be a cognitively intensive task. A recent approach, the Immersive Space to Think (IST), allows analysts to browse, read, mark up documents, and use immersive 3D space to organize and label collections of documents. In this study, we observed seventeen novice analysts perform a historical analysis task in order to understand how users utilize the features of IST to extract meaning from large text-based datasets. We found three different layout strategies they employed to create meaning with the documents we provided. We further found patterns of interaction and organization that can inform future improvements to the IST approach.", "abstracts": [ { "abstractType": "Regular", "content": "The process of sensemaking involves foraging through and extracting information from large sets of documents, and it can be a cognitively intensive task. A recent approach, the Immersive Space to Think (IST), allows analysts to browse, read, mark up documents, and use immersive 3D space to organize and label collections of documents. In this study, we observed seventeen novice analysts perform a historical analysis task in order to understand how users utilize the features of IST to extract meaning from large text-based datasets. We found three different layout strategies they employed to create meaning with the documents we provided. We further found patterns of interaction and organization that can inform future improvements to the IST approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The process of sensemaking involves foraging through and extracting information from large sets of documents, and it can be a cognitively intensive task. A recent approach, the Immersive Space to Think (IST), allows analysts to browse, read, mark up documents, and use immersive 3D space to organize and label collections of documents. In this study, we observed seventeen novice analysts perform a historical analysis task in order to understand how users utilize the features of IST to extract meaning from large text-based datasets. We found three different layout strategies they employed to create meaning with the documents we provided. We further found patterns of interaction and organization that can inform future improvements to the IST approach.", "fno": "255600a529", "keywords": [ "Data Visualisation", "Feature Extraction", "Human Computer Interaction", "Text Analysis", "Virtual Reality", "Sensemaking Strategies", "Immersive Space To Think", "Immersive 3 D Space", "IST Approach", "Information Extraction", "Document Labelling", "Feature Extraction", "Text Based Datasets", "Data Visualization", "Human Computer Interaction", "Virtual Text Documents", "Visualization", "Three Dimensional Displays", "Layout", "Virtual Reality", "Organizations", "User Interfaces", "Feature Extraction", "Human Centered Computing Visualization Visualization Techniques", "Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality" ], "authors": [ { "affiliation": "Center for Human-Computer Interaction,Department of Computer Science Virginia Tech", "fullName": "Lee Lisle", "givenName": "Lee", "surname": "Lisle", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Human-Computer Interaction,Department of Computer Science Virginia Tech", "fullName": "Kylie Davidson", "givenName": "Kylie", "surname": "Davidson", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Human-Computer Interaction,Department of History Virginia Tech", "fullName": "Edward J.K. Gitre", "givenName": "Edward J.K.", "surname": "Gitre", "__typename": "ArticleAuthorType" }, { "affiliation": "Sanghani Center,Department of Computer Science Virginia Tech", "fullName": "Chris North", "givenName": "Chris", "surname": "North", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Human-Computer Interaction,Department of Computer Science Virginia Tech", "fullName": "Doug A. Bowman", "givenName": "Doug A.", "surname": "Bowman", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "529-537", "year": "2021", "issn": null, "isbn": "978-1-6654-1838-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "255600a519", "articleId": "1tuBtNYt0LC", "__typename": "AdjacentArticleType" }, "next": { "fno": "255600a538", "articleId": "1tuAjEOnM8E", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vrw/2022/8402/0/840200a936", "title": "[DC] Immersive Analytics for Understanding Ecosystem Services Tradeoffs", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a936/1CJcFsf3SU0", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a363", "title": "QoE Study of Natural Interaction in Extended Reality Environment for Immersive Training", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a363/1CJenCw9Wmc", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09894094", "title": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think", "doi": null, "abstractUrl": "/journal/tg/5555/01/09894094/1GIqpC6j7na", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a479", "title": "Evaluating the Benefits of Explicit and Semi-Automated Clusters for Immersive Sensemaking", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a479/1JrR13pPxBK", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090546", "title": "[DC] The Immersive Space to Think: Immersive Analytics for Multimedia Data", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090546/1jIxrquhCNO", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090620", "title": "Evaluating the Benefits of the Immersive Space to Think", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090620/1jIxs5S1PwY", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382916", "title": "Exploring the SenseMaking Process through Interactions and fNIRS in Immersive Visualization", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382916/1saZna718yY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a633", "title": "Immersive Authoring of Virtual Reality Training", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a633/1tnXNG6t1x6", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600z023", "title": "Self-Avatars in Immersive Technology", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600z023/1tuAsCE62Tm", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2021/3335/0/333500a181", "title": "Narrative Sensemaking: Strategies for Narrative Maps Construction", "doi": null, "abstractUrl": "/proceedings-article/vis/2021/333500a181/1yXuj3PJXRm", "parentPublication": { "id": "proceedings/vis/2021/3335/0", "title": "2021 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "14jQfMYohco", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "14jQfPGqOcx", "doi": "10.1109/ICME.2018.8486606", "title": "CUB360: Exploiting Cross-Users Behaviors for Viewport Prediction in 360 Video Adaptive Streaming", "normalizedTitle": "CUB360: Exploiting Cross-Users Behaviors for Viewport Prediction in 360 Video Adaptive Streaming", "abstract": "To ensure 360-degree video's continuous playback and reduce the bandwidth waste, predicting user's future fixation is indispensable. However, existing methods concentrate either on user's motion information or content information. None of them consider users watching behaviors' inconsistency which embodies user's attention distribution more explicitly. So in this paper, we exploit Cross-Users Behaviors for viewport prediction in 360-degree video adaptive streaming, namely CUB360, trying to concurrently consider user's personalized information and cross-users behaviors information to predict future viewport. Besides, we use a QoE-driven framework to optimize existing video streaming approaches and propose a general algorithm aiming at solving the NP problem at a low complexity. Extensive experimental results over real datasets demonstrate that compared with traditional adaptive streaming method, our proposal can significantly boost the prediction accuracy by 20.2% absolutely and 48.1 % relatively. Besides, the mean quality can get 30.28% gain while quality variance can be reduced by 29.89%.", "abstracts": [ { "abstractType": "Regular", "content": "To ensure 360-degree video's continuous playback and reduce the bandwidth waste, predicting user's future fixation is indispensable. However, existing methods concentrate either on user's motion information or content information. None of them consider users watching behaviors' inconsistency which embodies user's attention distribution more explicitly. So in this paper, we exploit Cross-Users Behaviors for viewport prediction in 360-degree video adaptive streaming, namely CUB360, trying to concurrently consider user's personalized information and cross-users behaviors information to predict future viewport. Besides, we use a QoE-driven framework to optimize existing video streaming approaches and propose a general algorithm aiming at solving the NP problem at a low complexity. Extensive experimental results over real datasets demonstrate that compared with traditional adaptive streaming method, our proposal can significantly boost the prediction accuracy by 20.2% absolutely and 48.1 % relatively. Besides, the mean quality can get 30.28% gain while quality variance can be reduced by 29.89%.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "To ensure 360-degree video's continuous playback and reduce the bandwidth waste, predicting user's future fixation is indispensable. However, existing methods concentrate either on user's motion information or content information. None of them consider users watching behaviors' inconsistency which embodies user's attention distribution more explicitly. So in this paper, we exploit Cross-Users Behaviors for viewport prediction in 360-degree video adaptive streaming, namely CUB360, trying to concurrently consider user's personalized information and cross-users behaviors information to predict future viewport. Besides, we use a QoE-driven framework to optimize existing video streaming approaches and propose a general algorithm aiming at solving the NP problem at a low complexity. Extensive experimental results over real datasets demonstrate that compared with traditional adaptive streaming method, our proposal can significantly boost the prediction accuracy by 20.2% absolutely and 48.1 % relatively. Besides, the mean quality can get 30.28% gain while quality variance can be reduced by 29.89%.", "fno": "08486606", "keywords": [ "Streaming Media", "Prediction Algorithms", "Visualization", "Bit Rate", "Bandwidth", "Optimization", "Voting", "360 Degree Video", "Cross Users Behaviors", "Viewport Prediction", "Tile Based Adaptive Streaming", "Viewport Adaptive Streaming" ], "authors": [ { "affiliation": "Institute of Computer Science & Technology, Peking University, Beijing, China", "fullName": "Yixuan Ban", "givenName": "Yixuan", "surname": "Ban", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Computer Science & Technology, Peking University, Beijing, China", "fullName": "Lan Xie", "givenName": "Lan", "surname": "Xie", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Computer Science & Technology, Peking University, Beijing, China", "fullName": "Zhimin Xu", "givenName": "Zhimin", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Computer Science & Technology, Peking University, Beijing, China", "fullName": "Xinggong Zhang", "givenName": "Xinggong", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Computer Science & Technology, Peking University, Beijing, China", "fullName": "Zongming Guo", "givenName": "Zongming", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing ByteDance Technology Co., Ltd", "fullName": "Yue Wang", "givenName": "Yue", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2018", "issn": null, "isbn": "978-1-5386-1737-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08486535", "articleId": "14jQfS9Icrp", "__typename": "AdjacentArticleType" }, "next": { "fno": "08486454", "articleId": "14jQfO2OpDu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2017/2937/0/2937a038", "title": "A New Adaptation Approach for Viewport-adaptive 360-degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a038/12OmNwwd2MD", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2018/4195/0/08551493", "title": "Tile-Based Qoe-Driven Http/2 Streaming System For 360 Video", "doi": null, "abstractUrl": "/proceedings-article/icmew/2018/08551493/17D45VTRooi", "parentPublication": { "id": "proceedings/icmew/2018/4195/0", "title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/lcn/2018/4413/0/08638092", "title": "Plato: Learning-based Adaptive Streaming of 360-Degree Videos", "doi": null, "abstractUrl": "/proceedings-article/lcn/2018/08638092/18rqIpj1b3i", "parentPublication": { "id": "proceedings/lcn/2018/4413/0", "title": "2018 IEEE 43rd Conference on Local Computer Networks (LCN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2021/0668/0/066800a462", "title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction", "doi": null, "abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva", "parentPublication": { "id": "proceedings/msn/2021/0668/0", "title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859789", "title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifip-networking/2019/16/0/08999460", "title": "Advancing user quality of experience in 360-degree video streaming", "doi": null, "abstractUrl": "/proceedings-article/ifip-networking/2019/08999460/1hHLyJf1thC", "parentPublication": { "id": "proceedings/ifip-networking/2019/16/0", "title": "2019 IFIP Networking Conference (IFIP Networking)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/07/09261971", "title": "Online Bitrate Selection for Viewport Adaptive 360-Degree Video Streaming", "doi": null, "abstractUrl": "/journal/tm/2022/07/09261971/1oPzPzmWa9W", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a085", "title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a082", "title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2021/9101/0/09333964", "title": "Implementing Viewport Tile Extractor for Viewport-Adaptive 360-Degree Video Tiled Streaming", "doi": null, "abstractUrl": "/proceedings-article/icoin/2021/09333964/1qTrL1nfEyc", "parentPublication": { "id": "proceedings/icoin/2021/9101/0", "title": "2021 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G9DtzCwrjW", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G9EA5cTE88", "doi": "10.1109/ICME52920.2022.9859789", "title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming", "normalizedTitle": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming", "abstract": "Viewport prediction is the crucial task for viewport-adaptive 360-degree video streaming. Various viewport prediction methods are studied and adopted from less accurate statistic tools to highly calibrated deep neural networks. Conventionally, it is difficult to implement sophisticated deep learning methods on mobile devices, which have limited computation capability. In this work, we propose an advanced learning-based viewport prediction approach and carefully design it to introduce minimal transmission and computation overhead for mobile terminals. We further discuss how to integrate this mobile-friendly viewport prediction (MFVP) approach into the adaptive 360-degree video live streaming by formulating and solving the bitrate adaptation problem. Extensive experiment results show that our prediction approach can work in real-time for live streaming and can achieve higher accuracies compared to other existing prediction methods on mobile clients, which, together with our proposed bitrate adaptation algorithm, significantly improves the streaming Quality-of-Experience (QoE) from various aspects.", "abstracts": [ { "abstractType": "Regular", "content": "Viewport prediction is the crucial task for viewport-adaptive 360-degree video streaming. Various viewport prediction methods are studied and adopted from less accurate statistic tools to highly calibrated deep neural networks. Conventionally, it is difficult to implement sophisticated deep learning methods on mobile devices, which have limited computation capability. In this work, we propose an advanced learning-based viewport prediction approach and carefully design it to introduce minimal transmission and computation overhead for mobile terminals. We further discuss how to integrate this mobile-friendly viewport prediction (MFVP) approach into the adaptive 360-degree video live streaming by formulating and solving the bitrate adaptation problem. Extensive experiment results show that our prediction approach can work in real-time for live streaming and can achieve higher accuracies compared to other existing prediction methods on mobile clients, which, together with our proposed bitrate adaptation algorithm, significantly improves the streaming Quality-of-Experience (QoE) from various aspects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Viewport prediction is the crucial task for viewport-adaptive 360-degree video streaming. Various viewport prediction methods are studied and adopted from less accurate statistic tools to highly calibrated deep neural networks. Conventionally, it is difficult to implement sophisticated deep learning methods on mobile devices, which have limited computation capability. In this work, we propose an advanced learning-based viewport prediction approach and carefully design it to introduce minimal transmission and computation overhead for mobile terminals. We further discuss how to integrate this mobile-friendly viewport prediction (MFVP) approach into the adaptive 360-degree video live streaming by formulating and solving the bitrate adaptation problem. Extensive experiment results show that our prediction approach can work in real-time for live streaming and can achieve higher accuracies compared to other existing prediction methods on mobile clients, which, together with our proposed bitrate adaptation algorithm, significantly improves the streaming Quality-of-Experience (QoE) from various aspects.", "fno": "09859789", "keywords": [ "Industrial Robots", "Learning Artificial Intelligence", "Mobile Computing", "Mobile Robots", "Neural Nets", "Telerobotics", "Video Streaming", "Advanced Learning Based Viewport Prediction Approach", "Minimal Transmission", "Computation Overhead", "Mobile Terminals", "Mobile Friendly Viewport Prediction Approach", "MFVP", "360 Degree Video Live Streaming", "Bitrate Adaptation Problem", "Existing Prediction Methods", "Mobile Clients", "Bitrate Adaptation Algorithm", "Video Streaming", "Viewport Adaptive 360 Degree Video", "Viewport Prediction Methods", "Accurate Statistic Tools", "Deep Neural Networks", "Sophisticated Deep Learning Methods", "Mobile Devices", "Computation Capability", "Deep Learning", "Bit Rate", "Neural Networks", "Streaming Media", "Predictive Models", "Prediction Algorithms", "Real Time Systems", "Viewport Prediction", "Mobile", "Live Streaming", "360 Degree Video" ], "authors": [ { "affiliation": "Shenzhen University,China", "fullName": "Lei Zhang", "givenName": "Lei", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Shenzhen University,China", "fullName": "Weizhen Xu", "givenName": "Weizhen", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Tencent,China", "fullName": "Donghuan Lu", "givenName": "Donghuan", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": "Shenzhen University,China", "fullName": "Laizhong Cui", "givenName": "Laizhong", "surname": "Cui", "__typename": "ArticleAuthorType" }, { "affiliation": "Simon Fraser University,Canada", "fullName": "Jiangchuan Liu", "givenName": "Jiangchuan", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2022", "issn": null, "isbn": "978-1-6654-8563-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09859581", "articleId": "1G9DQkVPyXS", "__typename": "AdjacentArticleType" }, "next": { "fno": "09859858", "articleId": "1G9EfQYzPhK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2017/2937/0/2937a038", "title": "A New Adaptation Approach for Viewport-adaptive 360-degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a038/12OmNwwd2MD", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2016/4571/0/4571a583", "title": "Viewport-Adaptive Encoding and Streaming of 360-Degree Video for Virtual Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/ism/2016/4571a583/12OmNzsJ7Ig", "parentPublication": { "id": "proceedings/ism/2016/4571/0", "title": "2016 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486606", "title": "CUB360: Exploiting Cross-Users Behaviors for Viewport Prediction in 360 Video Adaptive Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486606/14jQfPGqOcx", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/lcn/2018/4413/0/08638092", "title": "Plato: Learning-based Adaptive Streaming of 360-Degree Videos", "doi": null, "abstractUrl": "/proceedings-article/lcn/2018/08638092/18rqIpj1b3i", "parentPublication": { "id": "proceedings/lcn/2018/4413/0", "title": "2018 IEEE 43rd Conference on Local Computer Networks (LCN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2021/0668/0/066800a462", "title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction", "doi": null, "abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva", "parentPublication": { "id": "proceedings/msn/2021/0668/0", "title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/2022/9792/0/09913007", "title": "Deep Reinforcement Learning Based Adaptive 360-degree Video Streaming with Field of View Joint Prediction", "doi": null, "abstractUrl": "/proceedings-article/iscc/2022/09913007/1HBK3Mimize", "parentPublication": { "id": "proceedings/iscc/2022/9792/0", "title": "2022 IEEE Symposium on Computers and Communications (ISCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2019/9214/0/921400a384", "title": "OPV: Bias Correction Based Optimal Probabilistic Viewport-Adaptive Streaming for 360-Degree Video", "doi": null, "abstractUrl": "/proceedings-article/icmew/2019/921400a384/1cJ0yrI3NHa", "parentPublication": { "id": "proceedings/icmew/2019/9214/0", "title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/07/09261971", "title": "Online Bitrate Selection for Viewport Adaptive 360-Degree Video Streaming", "doi": null, "abstractUrl": "/journal/tm/2022/07/09261971/1oPzPzmWa9W", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a085", "title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a082", "title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G9DtzCwrjW", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G9EwWVBvuo", "doi": "10.1109/ICME52920.2022.9859963", "title": "CoLive: An Edge-Assisted Online Learning Framework for Viewport Prediction in 360&#x00B0; Live Streaming", "normalizedTitle": "CoLive: An Edge-Assisted Online Learning Framework for Viewport Prediction in 360° Live Streaming", "abstract": "The ever-increasing demand for bandwidth resources when delivering premium quality 360&#x00B0; video challenges the current network capacity. To alleviate such bandwidth pressure, it is imperative to predict the viewport via observing the content visual feature and historical viewing behaviors, which thereby allows the system to concentrate the limited resource on viewer&#x0027;s region of interest in 360&#x00B0; content. However, enabling accurate viewport prediction for 360&#x00B0; live streaming is non-trivial given the time-sensitive of live content and shortage of pre-knowledge on the visual features and viewing behaviors. In this paper, we propose CoLive, an edge-assisted online viewport prediction framework. CoLive incorporates edge computing to offload the prediction model training from viewers and migrates the saliency feature detection to the server side for reducing the processing delay. Viewers can also collaboratively train a central predicting model via sharing their loss gradients. This central model, together with the saliency feature detection, further prompts accuracy prediction and learning acceleration, especially for new incoming viewers. A series of experiments on the public 360&#x00B0; video dataset show how our solution achieves better performance compared with state-of-the-art solutions.", "abstracts": [ { "abstractType": "Regular", "content": "The ever-increasing demand for bandwidth resources when delivering premium quality 360&#x00B0; video challenges the current network capacity. To alleviate such bandwidth pressure, it is imperative to predict the viewport via observing the content visual feature and historical viewing behaviors, which thereby allows the system to concentrate the limited resource on viewer&#x0027;s region of interest in 360&#x00B0; content. However, enabling accurate viewport prediction for 360&#x00B0; live streaming is non-trivial given the time-sensitive of live content and shortage of pre-knowledge on the visual features and viewing behaviors. In this paper, we propose CoLive, an edge-assisted online viewport prediction framework. CoLive incorporates edge computing to offload the prediction model training from viewers and migrates the saliency feature detection to the server side for reducing the processing delay. Viewers can also collaboratively train a central predicting model via sharing their loss gradients. This central model, together with the saliency feature detection, further prompts accuracy prediction and learning acceleration, especially for new incoming viewers. A series of experiments on the public 360&#x00B0; video dataset show how our solution achieves better performance compared with state-of-the-art solutions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The ever-increasing demand for bandwidth resources when delivering premium quality 360° video challenges the current network capacity. To alleviate such bandwidth pressure, it is imperative to predict the viewport via observing the content visual feature and historical viewing behaviors, which thereby allows the system to concentrate the limited resource on viewer's region of interest in 360° content. However, enabling accurate viewport prediction for 360° live streaming is non-trivial given the time-sensitive of live content and shortage of pre-knowledge on the visual features and viewing behaviors. In this paper, we propose CoLive, an edge-assisted online viewport prediction framework. CoLive incorporates edge computing to offload the prediction model training from viewers and migrates the saliency feature detection to the server side for reducing the processing delay. Viewers can also collaboratively train a central predicting model via sharing their loss gradients. This central model, together with the saliency feature detection, further prompts accuracy prediction and learning acceleration, especially for new incoming viewers. A series of experiments on the public 360° video dataset show how our solution achieves better performance compared with state-of-the-art solutions.", "fno": "09859963", "keywords": [ "Distributed Processing", "Feature Extraction", "Learning Artificial Intelligence", "Neural Nets", "Video Signal Processing", "Video Streaming", "Co Live", "Edge Assisted Online Learning Framework", "360 X 00 B 0 Live Streaming", "Bandwidth Resources", "Premium Quality 360 X 00 B 0 Video", "Current Network Capacity", "Bandwidth Pressure", "Content Visual Feature", "Historical Viewing Behaviors", "Live Content", "Visual Features", "Edge Assisted Online Viewport Prediction Framework", "Saliency Feature Detection", "Central Predicting Model", "Learning Acceleration", "Public 360 X 00 B 0 Video Dataset", "Visualization", "Computational Modeling", "Image Edge Detection", "Feature Detection", "Bandwidth", "Streaming Media", "Predictive Models", "Live 360 X 00 B 0 Video", "Viewport Prediction", "Online Learning", "Edge Computing" ], "authors": [ { "affiliation": "Tsinghua University,Beijing,China", "fullName": "Mu Wang", "givenName": "Mu", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Southwestern University of Finance and Economics,Chengdu,China", "fullName": "Shuai Peng", "givenName": "Shuai", "surname": "Peng", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing University of Posts and Telecommunications,Beijing,China", "fullName": "Xingyan Chen", "givenName": "Xingyan", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing University of Posts and Telecommunications,Beijing,China", "fullName": "Yu Zhao", "givenName": "Yu", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "Tsinghua University,Beijing,China", "fullName": "Mingwei Xu", "givenName": "Mingwei", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Southwestern University of Finance and Economics,Chengdu,China", "fullName": "Changqiao Xu", "givenName": "Changqiao", "surname": "Xu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2022", "issn": null, "isbn": "978-1-6654-8563-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09859783", "articleId": "1G9DXVlHFWU", "__typename": "AdjacentArticleType" }, "next": { "fno": "09859971", "articleId": "1G9Ey35T78Y", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmew/2018/4195/0/08551577", "title": "Viewport-Driven Rate-Distortion Optimized Scalable Live 360&#x00B0; Video Network Multicast", "doi": null, "abstractUrl": "/proceedings-article/icmew/2018/08551577/17D45WZZ7Db", "parentPublication": { "id": "proceedings/icmew/2018/4195/0", "title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859789", "title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iisa/2022/6390/0/09904420", "title": "Subtitle-based Viewport Prediction for 360-degree Virtual Tourism Video", "doi": null, "abstractUrl": "/proceedings-article/iisa/2022/09904420/1H5KpY37ODe", "parentPublication": { "id": "proceedings/iisa/2022/6390/0", "title": "2022 13th International Conference on Information, Intelligence, Systems & Applications (IISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200a296", "title": "Content-Aware Perspective Projection Optimization for Viewport Rendering of 360&#x00B0; Images", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200a296/1cdOTlMdEYw", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2019/5604/0/560400a074", "title": "Viewport Forecasting in 360&#x00B0; Virtual Reality Videos with Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/aivr/2019/560400a074/1grOlOCkPuM", "parentPublication": { "id": "proceedings/aivr/2019/5604/0", "title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300k0169", "title": "Viewport Proposal CNN for 360&#x00B0; Video Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300k0169/1gyrgYBrmpy", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/04/09212608", "title": "Viewport-Based CNN: A Multi-Task Approach for Assessing 360&#x00B0; Video Quality", "doi": null, "abstractUrl": "/journal/tp/2022/04/09212608/1nG8VYgj7Ik", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloud/2020/8780/0/878000a337", "title": "Allies: Tile-Based Joint Transcoding, Delivery and Caching of 360&#x00B0; Videos in Edge Cloud Networks", "doi": null, "abstractUrl": "/proceedings-article/cloud/2020/878000a337/1pF6lo64jOo", "parentPublication": { "id": "proceedings/cloud/2020/8780/0", "title": "2020 IEEE 13th International Conference on Cloud Computing (CLOUD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a085", "title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a082", "title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MNgk3BHlS0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1MNgV4ZxSQ8", "doi": "10.1109/VR55154.2023.00033", "title": "CaV3: Cache-assisted Viewport Adaptive Volumetric Video Streaming", "normalizedTitle": "CaV3: Cache-assisted Viewport Adaptive Volumetric Video Streaming", "abstract": "Volumetric video (VV) recently emerges as a new form of video application providing a photorealistic immersive 3D viewing experience with 6 degree-of-freedom (DoF), which empowers many applications such as VR, AR, and Metaverse. A key problem therein is how to stream the enormous size VV through the network with limited bandwidth. Existing works mostly focused on predicting the viewport for a tiling-based adaptive VV streaming, which however only has quite a limited effect on resource saving. We argue that the content repeatability in the viewport can be further leveraged, and for the first time, propose a client-side cache-assisted strategy that aims to buffer the repeatedly appearing VV tiles in the near future so as to reduce the redundant VV content transmission. The key challenges exist in three aspects, including (1) feature extraction and mining in 6 DoF VV context, (2) accurate long-term viewing pattern estimation and (3) optimal caching scheduling with limited capacity. In this paper, we propose CaV3, an integrated cache-assisted viewport adaptive VV streaming framework to address the challenges. CaV3 employs a Long-short term Sequential prediction model (LSTSP) that achieves accurate short-term, mid-term and long-term viewing pattern prediction with a multi-modal fusion model by capturing the viewer&#x0027;s behavior inertia, current attention, and subjective intention. Besides, CaV3 also contains a contextual MAB-based caching adaptation algorithm (CCA) to fully utilize the viewing pattern and solve the optimal caching problem with a proved upper bound regret. Compared to existing VV datasets only containing single or co-located objects, we for the first time collect a comprehensive dataset with sufficient practical unbounded 360&#x00B0; scenes. The extensive evaluation of the dataset confirms the superiority of CaV3, which outperforms the SOTA algorithm by 15.6&#x0025;-43&#x0025; in viewport prediction and 13&#x0025;-40&#x0025; in system utility.", "abstracts": [ { "abstractType": "Regular", "content": "Volumetric video (VV) recently emerges as a new form of video application providing a photorealistic immersive 3D viewing experience with 6 degree-of-freedom (DoF), which empowers many applications such as VR, AR, and Metaverse. A key problem therein is how to stream the enormous size VV through the network with limited bandwidth. Existing works mostly focused on predicting the viewport for a tiling-based adaptive VV streaming, which however only has quite a limited effect on resource saving. We argue that the content repeatability in the viewport can be further leveraged, and for the first time, propose a client-side cache-assisted strategy that aims to buffer the repeatedly appearing VV tiles in the near future so as to reduce the redundant VV content transmission. The key challenges exist in three aspects, including (1) feature extraction and mining in 6 DoF VV context, (2) accurate long-term viewing pattern estimation and (3) optimal caching scheduling with limited capacity. In this paper, we propose CaV3, an integrated cache-assisted viewport adaptive VV streaming framework to address the challenges. CaV3 employs a Long-short term Sequential prediction model (LSTSP) that achieves accurate short-term, mid-term and long-term viewing pattern prediction with a multi-modal fusion model by capturing the viewer&#x0027;s behavior inertia, current attention, and subjective intention. Besides, CaV3 also contains a contextual MAB-based caching adaptation algorithm (CCA) to fully utilize the viewing pattern and solve the optimal caching problem with a proved upper bound regret. Compared to existing VV datasets only containing single or co-located objects, we for the first time collect a comprehensive dataset with sufficient practical unbounded 360&#x00B0; scenes. The extensive evaluation of the dataset confirms the superiority of CaV3, which outperforms the SOTA algorithm by 15.6&#x0025;-43&#x0025; in viewport prediction and 13&#x0025;-40&#x0025; in system utility.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Volumetric video (VV) recently emerges as a new form of video application providing a photorealistic immersive 3D viewing experience with 6 degree-of-freedom (DoF), which empowers many applications such as VR, AR, and Metaverse. A key problem therein is how to stream the enormous size VV through the network with limited bandwidth. Existing works mostly focused on predicting the viewport for a tiling-based adaptive VV streaming, which however only has quite a limited effect on resource saving. We argue that the content repeatability in the viewport can be further leveraged, and for the first time, propose a client-side cache-assisted strategy that aims to buffer the repeatedly appearing VV tiles in the near future so as to reduce the redundant VV content transmission. The key challenges exist in three aspects, including (1) feature extraction and mining in 6 DoF VV context, (2) accurate long-term viewing pattern estimation and (3) optimal caching scheduling with limited capacity. In this paper, we propose CaV3, an integrated cache-assisted viewport adaptive VV streaming framework to address the challenges. CaV3 employs a Long-short term Sequential prediction model (LSTSP) that achieves accurate short-term, mid-term and long-term viewing pattern prediction with a multi-modal fusion model by capturing the viewer's behavior inertia, current attention, and subjective intention. Besides, CaV3 also contains a contextual MAB-based caching adaptation algorithm (CCA) to fully utilize the viewing pattern and solve the optimal caching problem with a proved upper bound regret. Compared to existing VV datasets only containing single or co-located objects, we for the first time collect a comprehensive dataset with sufficient practical unbounded 360° scenes. The extensive evaluation of the dataset confirms the superiority of CaV3, which outperforms the SOTA algorithm by 15.6%-43% in viewport prediction and 13%-40% in system utility.", "fno": "481500a173", "keywords": [ "Adaptation Models", "Solid Modeling", "Three Dimensional Displays", "Upper Bound", "Virtual Reality", "Streaming Media", "Predictive Models" ], "authors": [ { "affiliation": "The Future Network of Intelligence Institute, The Chinese University of Hong Kong,Shenzhen", "fullName": "Junhua Liu", "givenName": "Junhua", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "The Future Network of Intelligence Institute, The Chinese University of Hong Kong,Shenzhen", "fullName": "Boxiang Zhu", "givenName": "Boxiang", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Science and Engineering, The Chinese University of Hong Kong,Shenzhen", "fullName": "Fangxin Wang", "givenName": "Fangxin", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "The Future Network of Intelligence Institute, The Chinese University of Hong Kong,Shenzhen", "fullName": "Yili Jin", "givenName": "Yili", "surname": "Jin", "__typename": "ArticleAuthorType" }, { "affiliation": "The Future Network of Intelligence Institute, The Chinese University of Hong Kong,Shenzhen", "fullName": "Wenyi Zhang", "givenName": "Wenyi", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "The Future Network of Intelligence Institute, The Chinese University of Hong Kong,Shenzhen", "fullName": "Zihan Xu", "givenName": "Zihan", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Science and Engineering, The Chinese University of Hong Kong,Shenzhen", "fullName": "Shuguang Cui", "givenName": "Shuguang", "surname": "Cui", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-03-01T00:00:00", "pubType": "proceedings", "pages": "173-183", "year": "2023", "issn": null, "isbn": "979-8-3503-4815-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1MNgV1lSMqQ", "name": "pvr202348150-010108421s1-mm_481500a173.zip", "size": "18 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108421s1-mm_481500a173.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "481500a163", "articleId": "1MNgZpTDDFu", "__typename": "AdjacentArticleType" }, "next": { "fno": "481500a184", "articleId": "1MNgRf6yufS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2017/2937/0/2937a038", "title": "A New Adaptation Approach for Viewport-adaptive 360-degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a038/12OmNwwd2MD", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2018/9269/0/926900a157", "title": "Trajectory-Based Viewport Prediction for 360-Degree Virtual Reality Videos", "doi": null, "abstractUrl": "/proceedings-article/aivr/2018/926900a157/17D45WZZ7Fb", "parentPublication": { "id": "proceedings/aivr/2018/9269/0", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2021/0668/0/066800a462", "title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction", "doi": null, "abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva", "parentPublication": { "id": "proceedings/msn/2021/0668/0", "title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859789", "title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859963", "title": "CoLive: An Edge-Assisted Online Learning Framework for Viewport Prediction in 360&#x00B0; Live Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859963/1G9EwWVBvuo", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2019/9214/0/921400a384", "title": "OPV: Bias Correction Based Optimal Probabilistic Viewport-Adaptive Streaming for 360-Degree Video", "doi": null, "abstractUrl": "/proceedings-article/icmew/2019/921400a384/1cJ0yrI3NHa", "parentPublication": { "id": "proceedings/icmew/2019/9214/0", "title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089486", "title": "LiveDeep: Online Viewport Prediction for Live Virtual Reality Streaming Using Lifelong Deep Learning", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089486/1jIx7O3auI0", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a085", "title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2021/9101/0/09333964", "title": "Implementing Viewport Tile Extractor for Viewport-Adaptive 360-Degree Video Tiled Streaming", "doi": null, "abstractUrl": "/proceedings-article/icoin/2021/09333964/1qTrL1nfEyc", "parentPublication": { "id": "proceedings/icoin/2021/9101/0", "title": "2021 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09393620", "title": "LiveObj: Object Semantics-based Viewport Prediction for Live Mobile Virtual Reality Streaming", "doi": null, "abstractUrl": "/journal/tg/2021/05/09393620/1srMExDcXcY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1grOiRpGmv6", "title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1grOj3Q3Hwc", "doi": "10.1109/AIVR46125.2019.00038", "title": "Exploring CNN-Based Viewport Prediction for Live Virtual Reality Streaming", "normalizedTitle": "Exploring CNN-Based Viewport Prediction for Live Virtual Reality Streaming", "abstract": "Live virtual reality streaming (a.k.a., 360-degree video streaming) is gaining popularity recently with its rapid growth in the consumer market. However, the huge bandwidth required by delivering the 360-degree frames becomes the bottleneck, keeping this application from a wider range of deployment. Research efforts have been carried out to solve the bandwidth problem by predicting the user's viewport of interest and selectively streaming a part of the whole frame. However, currently most of the viewport prediction approaches cannot address the unique challenges in the live streaming scenario, where there is no historical user or video traces to build the prediction model. In this paper, we explore the opportunity of leveraging convolutional neural network (CNN) to predict the user's viewport in live streaming by modifying the workflow of the CNN application and the training/testing process. The evaluation results reveal that the CNN-based method could achieve a high prediction accuracy with low bandwidth usage and low timing overhead.", "abstracts": [ { "abstractType": "Regular", "content": "Live virtual reality streaming (a.k.a., 360-degree video streaming) is gaining popularity recently with its rapid growth in the consumer market. However, the huge bandwidth required by delivering the 360-degree frames becomes the bottleneck, keeping this application from a wider range of deployment. Research efforts have been carried out to solve the bandwidth problem by predicting the user's viewport of interest and selectively streaming a part of the whole frame. However, currently most of the viewport prediction approaches cannot address the unique challenges in the live streaming scenario, where there is no historical user or video traces to build the prediction model. In this paper, we explore the opportunity of leveraging convolutional neural network (CNN) to predict the user's viewport in live streaming by modifying the workflow of the CNN application and the training/testing process. The evaluation results reveal that the CNN-based method could achieve a high prediction accuracy with low bandwidth usage and low timing overhead.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Live virtual reality streaming (a.k.a., 360-degree video streaming) is gaining popularity recently with its rapid growth in the consumer market. However, the huge bandwidth required by delivering the 360-degree frames becomes the bottleneck, keeping this application from a wider range of deployment. Research efforts have been carried out to solve the bandwidth problem by predicting the user's viewport of interest and selectively streaming a part of the whole frame. However, currently most of the viewport prediction approaches cannot address the unique challenges in the live streaming scenario, where there is no historical user or video traces to build the prediction model. In this paper, we explore the opportunity of leveraging convolutional neural network (CNN) to predict the user's viewport in live streaming by modifying the workflow of the CNN application and the training/testing process. The evaluation results reveal that the CNN-based method could achieve a high prediction accuracy with low bandwidth usage and low timing overhead.", "fno": "560400a183", "keywords": [ "CNN", "Viewport Prediction", "Live VR Streaming" ], "authors": [ { "affiliation": "Rutgers University", "fullName": "Xianglong Feng", "givenName": "Xianglong", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "Rutgers University", "fullName": "Zeyang Bao", "givenName": "Zeyang", "surname": "Bao", "__typename": "ArticleAuthorType" }, { "affiliation": "Rutgers University", "fullName": "Sheng Wei", "givenName": "Sheng", "surname": "Wei", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-12-01T00:00:00", "pubType": "proceedings", "pages": "183-1833", "year": "2019", "issn": null, "isbn": "978-1-7281-5604-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "560400a179", "articleId": "1grOjazBDR6", "__typename": "AdjacentArticleType" }, "next": { "fno": "560400a187", "articleId": "1grOjnCAqyY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2017/2937/0/2937a038", "title": "A New Adaptation Approach for Viewport-adaptive 360-degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a038/12OmNwwd2MD", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2021/0668/0/066800a462", "title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction", "doi": null, "abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva", "parentPublication": { "id": "proceedings/msn/2021/0668/0", "title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859789", "title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859963", "title": "CoLive: An Edge-Assisted Online Learning Framework for Viewport Prediction in 360&#x00B0; Live Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859963/1G9EwWVBvuo", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2019/9214/0/921400a324", "title": "VAS360: QoE-Driven Viewport Adaptive Streaming for 360 Video", "doi": null, "abstractUrl": "/proceedings-article/icmew/2019/921400a324/1cJ0BSNq6FW", "parentPublication": { "id": "proceedings/icmew/2019/9214/0", "title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300k0169", "title": "Viewport Proposal CNN for 360&#x00B0; Video Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300k0169/1gyrgYBrmpy", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089486", "title": "LiveDeep: Online Viewport Prediction for Live Virtual Reality Streaming Using Lifelong Deep Learning", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089486/1jIx7O3auI0", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2020/8697/0/869700a085", "title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC", "doi": null, "abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG", "parentPublication": { "id": "proceedings/ism/2020/8697/0", "title": "2020 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2021/9101/0/09333964", "title": "Implementing Viewport Tile Extractor for Viewport-Adaptive 360-Degree Video Tiled Streaming", "doi": null, "abstractUrl": "/proceedings-article/icoin/2021/09333964/1qTrL1nfEyc", "parentPublication": { "id": "proceedings/icoin/2021/9101/0", "title": "2021 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09393620", "title": "LiveObj: Object Semantics-based Viewport Prediction for Live Mobile Virtual Reality Streaming", "doi": null, "abstractUrl": "/journal/tg/2021/05/09393620/1srMExDcXcY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIx7fmpQ9a", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIx7O3auI0", "doi": "10.1109/VR46266.2020.00104", "title": "LiveDeep: Online Viewport Prediction for Live Virtual Reality Streaming Using Lifelong Deep Learning", "normalizedTitle": "LiveDeep: Online Viewport Prediction for Live Virtual Reality Streaming Using Lifelong Deep Learning", "abstract": "Live virtual reality (VR) streaming has become a popular and trending video application in the consumer market providing users with 360-degree, immersive viewing experiences. To provide premium quality of experience, VR streaming faces unique challenges due to the significantly increased bandwidth consumption. To address the bandwidth challenge, VR video viewport prediction has been proposed as a viable solution, which predicts and streams only the user&#x2019;s viewport of interest with high quality to the VR device. However, most of the existing viewport prediction approaches target only the video-on-demand (VOD) use cases, requiring offline processing of the historical video and/or user data that are not available in the live streaming scenario. In this work, we develop a novel viewport prediction approach for live VR streaming, which only requires video content and user data in the current viewing session. To address the challenges of insufficient training data and real-time processing, we propose a live VR-specific deep learning mechanism, namely LiveDeep, to create the online viewport prediction model and conduct real-time inference. LiveDeep employs a hybrid approach to address the unique challenges in live VR streaming, involving (1) an alternate online data collection, labeling, training, and inference schedule with controlled feedback loop to accommodate for the sparse training data; and (2) a mixture of hybrid neural network models to accommodate for the inaccuracy caused by a single model. We evaluate LiveDeep using 48 users and 14 VR videos of various types obtained from a public VR user head movement dataset. The results indicate around 90% prediction accuracy, around 40% bandwidth savings, and premium processing time, which meets the bandwidth and real-time requirements of live VR streaming.", "abstracts": [ { "abstractType": "Regular", "content": "Live virtual reality (VR) streaming has become a popular and trending video application in the consumer market providing users with 360-degree, immersive viewing experiences. To provide premium quality of experience, VR streaming faces unique challenges due to the significantly increased bandwidth consumption. To address the bandwidth challenge, VR video viewport prediction has been proposed as a viable solution, which predicts and streams only the user&#x2019;s viewport of interest with high quality to the VR device. However, most of the existing viewport prediction approaches target only the video-on-demand (VOD) use cases, requiring offline processing of the historical video and/or user data that are not available in the live streaming scenario. In this work, we develop a novel viewport prediction approach for live VR streaming, which only requires video content and user data in the current viewing session. To address the challenges of insufficient training data and real-time processing, we propose a live VR-specific deep learning mechanism, namely LiveDeep, to create the online viewport prediction model and conduct real-time inference. LiveDeep employs a hybrid approach to address the unique challenges in live VR streaming, involving (1) an alternate online data collection, labeling, training, and inference schedule with controlled feedback loop to accommodate for the sparse training data; and (2) a mixture of hybrid neural network models to accommodate for the inaccuracy caused by a single model. We evaluate LiveDeep using 48 users and 14 VR videos of various types obtained from a public VR user head movement dataset. The results indicate around 90% prediction accuracy, around 40% bandwidth savings, and premium processing time, which meets the bandwidth and real-time requirements of live VR streaming.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Live virtual reality (VR) streaming has become a popular and trending video application in the consumer market providing users with 360-degree, immersive viewing experiences. To provide premium quality of experience, VR streaming faces unique challenges due to the significantly increased bandwidth consumption. To address the bandwidth challenge, VR video viewport prediction has been proposed as a viable solution, which predicts and streams only the user’s viewport of interest with high quality to the VR device. However, most of the existing viewport prediction approaches target only the video-on-demand (VOD) use cases, requiring offline processing of the historical video and/or user data that are not available in the live streaming scenario. In this work, we develop a novel viewport prediction approach for live VR streaming, which only requires video content and user data in the current viewing session. To address the challenges of insufficient training data and real-time processing, we propose a live VR-specific deep learning mechanism, namely LiveDeep, to create the online viewport prediction model and conduct real-time inference. LiveDeep employs a hybrid approach to address the unique challenges in live VR streaming, involving (1) an alternate online data collection, labeling, training, and inference schedule with controlled feedback loop to accommodate for the sparse training data; and (2) a mixture of hybrid neural network models to accommodate for the inaccuracy caused by a single model. We evaluate LiveDeep using 48 users and 14 VR videos of various types obtained from a public VR user head movement dataset. The results indicate around 90% prediction accuracy, around 40% bandwidth savings, and premium processing time, which meets the bandwidth and real-time requirements of live VR streaming.", "fno": "09089486", "keywords": [ "Streaming Media", "Predictive Models", "Bandwidth", "Real Time Systems", "Data Models", "Machine Learning", "Virtual Reality", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Virtual Reality" ], "authors": [ { "affiliation": "Rutgers University", "fullName": "Xianglong Feng", "givenName": "Xianglong", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "SUNY Binghamton", "fullName": "Yao Liu", "givenName": "Yao", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Rutgers University", "fullName": "Sheng Wei", "givenName": "Sheng", "surname": "Wei", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "800-808", "year": "2020", "issn": null, "isbn": "978-1-7281-5608-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09089584", "articleId": "1jIx99YL1cI", "__typename": "AdjacentArticleType" }, "next": { "fno": "09089490", "articleId": "1jIxgqureDe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2018/1737/0/08486606", "title": "CUB360: Exploiting Cross-Users Behaviors for Viewport Prediction in 360 Video Adaptive Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486606/14jQfPGqOcx", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2021/0668/0/066800a462", "title": "Soft Actor-Critic Algorithm for 360-Degree Video Streaming with Long-Term Viewport Prediction", "doi": null, "abstractUrl": "/proceedings-article/msn/2021/066800a462/1CxzyBvoVva", "parentPublication": { "id": "proceedings/msn/2021/0668/0", "title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859789", "title": "MFVP: Mobile-Friendly Viewport Prediction for Live 360-Degree Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859789/1G9EA5cTE88", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859963", "title": "CoLive: An Edge-Assisted Online Learning Framework for Viewport Prediction in 360&#x00B0; Live Streaming", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859963/1G9EwWVBvuo", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iisa/2022/6390/0/09904420", "title": "Subtitle-based Viewport Prediction for 360-degree Virtual Tourism Video", "doi": null, "abstractUrl": "/proceedings-article/iisa/2022/09904420/1H5KpY37ODe", "parentPublication": { "id": "proceedings/iisa/2022/6390/0", "title": "2022 13th International Conference on Information, Intelligence, Systems & Applications (IISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2022/6457/0/645700a609", "title": "InstaVarjoLive: An Edge-Assisted 360 Degree Video Live Streaming for Virtual Reality Testbed", "doi": null, "abstractUrl": "/proceedings-article/msn/2022/645700a609/1LUtLUWKy4g", "parentPublication": { "id": "proceedings/msn/2022/6457/0", "title": "2022 18th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a173", "title": "CaV3: Cache-assisted Viewport Adaptive Volumetric Video Streaming", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a173/1MNgV4ZxSQ8", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2019/5604/0/560400a183", "title": "Exploring CNN-Based Viewport Prediction for Live Virtual Reality Streaming", "doi": null, "abstractUrl": "/proceedings-article/aivr/2019/560400a183/1grOj3Q3Hwc", "parentPublication": { "id": "proceedings/aivr/2019/5604/0", "title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2021/9101/0/09333964", "title": "Implementing Viewport Tile Extractor for Viewport-Adaptive 360-Degree Video Tiled Streaming", "doi": null, "abstractUrl": "/proceedings-article/icoin/2021/09333964/1qTrL1nfEyc", "parentPublication": { "id": "proceedings/icoin/2021/9101/0", "title": "2021 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09393620", "title": "LiveObj: Object Semantics-based Viewport Prediction for Live Mobile Virtual Reality Streaming", "doi": null, "abstractUrl": "/journal/tg/2021/05/09393620/1srMExDcXcY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx8wTfL", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNAnuTqb", "doi": "10.1109/ICPR.2008.4761540", "title": "Real-time image-based rendering system for virtual city based on image compression technique and eigen texture method", "normalizedTitle": "Real-time image-based rendering system for virtual city based on image compression technique and eigen texture method", "abstract": "Computer modeling of a large-scale scene such as a city becomes an important topic for computer vision and computer graphics research areas etc. Image-based rendering (IBR) is an effective method for expressing realistic scene, and can construct any arbitrary viewpoint by using the captured real images. However, the large size of the image database in IBR causes serious problems in actual applications, leading to the use of compression techniques. We propose a compression technique based on eigen space combined with a block matching technique to get better result. We also propose a technique to restore the compressed data on Graphic Processing Unit (GPU), allowing us to perform high-speed rendering without raising the load on the CPU.", "abstracts": [ { "abstractType": "Regular", "content": "Computer modeling of a large-scale scene such as a city becomes an important topic for computer vision and computer graphics research areas etc. Image-based rendering (IBR) is an effective method for expressing realistic scene, and can construct any arbitrary viewpoint by using the captured real images. However, the large size of the image database in IBR causes serious problems in actual applications, leading to the use of compression techniques. We propose a compression technique based on eigen space combined with a block matching technique to get better result. We also propose a technique to restore the compressed data on Graphic Processing Unit (GPU), allowing us to perform high-speed rendering without raising the load on the CPU.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Computer modeling of a large-scale scene such as a city becomes an important topic for computer vision and computer graphics research areas etc. Image-based rendering (IBR) is an effective method for expressing realistic scene, and can construct any arbitrary viewpoint by using the captured real images. However, the large size of the image database in IBR causes serious problems in actual applications, leading to the use of compression techniques. We propose a compression technique based on eigen space combined with a block matching technique to get better result. We also propose a technique to restore the compressed data on Graphic Processing Unit (GPU), allowing us to perform high-speed rendering without raising the load on the CPU.", "fno": "04761540", "keywords": [ "Computer Graphics", "Data Compression", "Eigenvalues And Eigenfunctions", "Image Coding", "Image Matching", "Image Texture", "Visual Databases", "Real Time Image Based Rendering System", "Virtual City", "Image Compression Technique", "Eigen Texture Method", "Computer Graphics", "Realistic Scene", "Captured Real Images", "Image Database", "Block Matching Technique", "Graphic Processing Unit", "Real Time Systems", "Rendering Computer Graphics", "Cities And Towns", "Image Coding", "Computer Vision", "Layout", "Large Scale Systems", "Computer Graphics", "Image Databases", "Application Software" ], "authors": [ { "affiliation": "Saitama University, Japan", "fullName": "Ryo Sato", "givenName": "Ryo", "surname": "Sato", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo, Japan", "fullName": "Shintaro Ono", "givenName": "Shintaro", "surname": "Ono", "__typename": "ArticleAuthorType" }, { "affiliation": "Saitama University, Japan", "fullName": "Hiroshi Kawasaki", "givenName": "Hiroshi", "surname": "Kawasaki", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo, Japan", "fullName": "Katsushi Ikeuchi", "givenName": "Katsushi", "surname": "Ikeuchi", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": "1051-4651", "isbn": "978-1-4244-2174-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04761539", "articleId": "12OmNB8kHNc", "__typename": "AdjacentArticleType" }, "next": { "fno": "04761541", "articleId": "12OmNznkKag", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/visual/2001/7201/0/00964520", "title": "Texture hardware assisted rendering of time-varying volume data", "doi": null, "abstractUrl": "/proceedings-article/visual/2001/00964520/12OmNAR1aX5", "parentPublication": { "id": "proceedings/visual/2001/7201/0", "title": "Proceedings VIS 2001. Visualization 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwar/1999/0359/0/03590115", "title": "Photometric Image-Based Rendering for Virtual Lighting Image Synthesis", "doi": null, "abstractUrl": "/proceedings-article/iwar/1999/03590115/12OmNqNG3km", "parentPublication": { "id": "proceedings/iwar/1999/0359/0", "title": "Augmented Reality, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2011/279/0/05749524", "title": "Rendering Lossless Compression of Depth Image", "doi": null, "abstractUrl": "/proceedings-article/dcc/2011/05749524/12OmNqzcvHl", "parentPublication": { "id": "proceedings/dcc/2011/279/0", "title": "2011 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a351", "title": "Multi-View Inpainting for Image-Based Scene Editing and Rendering", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a351/12OmNxEjXRB", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2011/4602/0/4602a311", "title": "Noniterative Adaptive Sampling for Image-Based Rendering", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2011/4602a311/12OmNyRg4k0", "parentPublication": { "id": "proceedings/icvrv/2011/4602/0", "title": "2011 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532798", "title": "View-dependent rendering of multiresolution texture-atlases", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532798/12OmNyYDDCK", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a469", "title": "A Bayesian Approach for Selective Image-Based Rendering Using Superpixels", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a469/12OmNzR8Cwl", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011857", "title": "Architecture design and analysis of image-based rendering engine", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011857/12OmNzVoBzN", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwcse/2009/3881/1/3881a288", "title": "Image-Based Rendering Using Unstructured Image Set", "doi": null, "abstractUrl": "/proceedings-article/iwcse/2009/3881a288/12OmNzwHvmn", "parentPublication": { "id": "proceedings/iwcse/2009/3881/1", "title": "Computer Science and Engineering, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600h803", "title": "Learning Robust Image-Based Rendering on Sparse Scene Geometry via Depth Completion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600h803/1H1kEugxLxe", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvRU0cK", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNx5GTYC", "doi": "10.1109/ISMAR.2016.18", "title": "Instant Mixed Reality Lighting from Casual Scanning", "normalizedTitle": "Instant Mixed Reality Lighting from Casual Scanning", "abstract": "We present a method for recovering both incident lighting and surface materials from casually scanned geometry. By casual, we mean a rapid and potentially noisy scanning procedure of unmodified and uninstrumented scenes with a commodity RGB-D sensor. In other words, unlike reconstruction procedures which require careful preparations in a laboratory environment, our method works with input that can be obtained by consumer users. To ensure a robust procedure, we segment the reconstructed geometry into surfaces with homogeneous material properties and compute the radiance transfer on these segments. With this input, we solve the inverse rendering problem of factorization into lighting and material properties using an iterative optimization in spherical harmonics form. This allows us to account for self-shadowing and recover specular properties. The resulting data can be used to generate a wide range of mixed reality applications, including the rendering of synthetic objects with matching lighting into a given scene, but also re-rendering the scene (or a part of it) with new lighting. We show the robustness of our approach with real and synthetic examples under a variety of lighting conditions and compare them with ground truth data.", "abstracts": [ { "abstractType": "Regular", "content": "We present a method for recovering both incident lighting and surface materials from casually scanned geometry. By casual, we mean a rapid and potentially noisy scanning procedure of unmodified and uninstrumented scenes with a commodity RGB-D sensor. In other words, unlike reconstruction procedures which require careful preparations in a laboratory environment, our method works with input that can be obtained by consumer users. To ensure a robust procedure, we segment the reconstructed geometry into surfaces with homogeneous material properties and compute the radiance transfer on these segments. With this input, we solve the inverse rendering problem of factorization into lighting and material properties using an iterative optimization in spherical harmonics form. This allows us to account for self-shadowing and recover specular properties. The resulting data can be used to generate a wide range of mixed reality applications, including the rendering of synthetic objects with matching lighting into a given scene, but also re-rendering the scene (or a part of it) with new lighting. We show the robustness of our approach with real and synthetic examples under a variety of lighting conditions and compare them with ground truth data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a method for recovering both incident lighting and surface materials from casually scanned geometry. By casual, we mean a rapid and potentially noisy scanning procedure of unmodified and uninstrumented scenes with a commodity RGB-D sensor. In other words, unlike reconstruction procedures which require careful preparations in a laboratory environment, our method works with input that can be obtained by consumer users. To ensure a robust procedure, we segment the reconstructed geometry into surfaces with homogeneous material properties and compute the radiance transfer on these segments. With this input, we solve the inverse rendering problem of factorization into lighting and material properties using an iterative optimization in spherical harmonics form. This allows us to account for self-shadowing and recover specular properties. The resulting data can be used to generate a wide range of mixed reality applications, including the rendering of synthetic objects with matching lighting into a given scene, but also re-rendering the scene (or a part of it) with new lighting. We show the robustness of our approach with real and synthetic examples under a variety of lighting conditions and compare them with ground truth data.", "fno": "3641a027", "keywords": [ "Lighting", "Cameras", "Image Color Analysis", "Face", "Geometry", "Image Reconstruction", "Estimation", "Virtual Realities I 4 8 Image Processing And Computer Vision Photometric Registration 3 D Reconstruction", "H 5 1 Information Interfaces And Presentation Artificial", "Augmented" ], "authors": [ { "affiliation": null, "fullName": "Thomas Richter-Trummer", "givenName": "Thomas", "surname": "Richter-Trummer", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Denis Kalkofen", "givenName": "Denis", "surname": "Kalkofen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jinwoo Park", "givenName": "Jinwoo", "surname": "Park", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dieter Schmalstieg", "givenName": "Dieter", "surname": "Schmalstieg", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-09-01T00:00:00", "pubType": "proceedings", "pages": "27-36", "year": "2016", "issn": null, "isbn": "978-1-5090-3641-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3641a018", "articleId": "12OmNCwUmxA", "__typename": "AdjacentArticleType" }, "next": { "fno": "3641a037", "articleId": "12OmNrJAdMm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032d133", "title": "Intrinsic3D: High-Quality 3D Reconstruction by Joint Appearance and Geometry Optimization with Spatially-Varying Lighting", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d133/12OmNC4eSyL", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2015/6026/1/07163128", "title": "Realistic inverse lighting from a single 2D image of a face, taken under unknown and complex lighting", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163128/12OmNwbLVkr", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/04/08606207", "title": "Hierarchical Bayesian Inverse Lighting of Portraits with a Virtual Light Stage", "doi": null, "abstractUrl": "/journal/tp/2020/04/08606207/17D45W2WyxK", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09783067", "title": "A Self-occlusion Aware Lighting Model for Real-time Dynamic Reconstruction", "doi": null, "abstractUrl": "/journal/tg/5555/01/09783067/1DIwTDMm7Mk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8541", "title": "PhotoScene: Photorealistic Material and Lighting Transfer for Indoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8541/1H1nmFHmaoE", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300i597", "title": "Neural Inverse Rendering of an Indoor Scene From a Single Image", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300i597/1hVlOrVOpck", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800c472", "title": "Inverse Rendering for Complex Indoor Scenes: Shape, Spatially-Varying Lighting and SVBRDF From a Single Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800c472/1m3o03C864M", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800b186", "title": "Shape from Tracing: Towards Reconstructing 3D Object Geometry and SVBRDF Material from Images via Differentiable Path Tracing", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800b186/1qyxkY66O08", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800b147", "title": "Precomputed Radiance Transfer for Reflectance and Lighting Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800b147/1qyxlpSwLhC", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0586", "title": "Lighting, Reflectance and Geometry Estimation from 360&#x00B0; Panoramic Stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0586/1yeIplXJ9wQ", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx2QUDD", "title": "2015 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNzR8Cwl", "doi": "10.1109/3DV.2015.59", "title": "A Bayesian Approach for Selective Image-Based Rendering Using Superpixels", "normalizedTitle": "A Bayesian Approach for Selective Image-Based Rendering Using Superpixels", "abstract": "Image-Based Rendering (IBR) algorithms generate high quality photo-realistic imagery without the burden of detailed modeling and expensive realistic rendering. Recent methods have different strengths and weaknesses, depending on 3D reconstruction quality and scene content. Each algorithm operates with a set of hypotheses about the scene and the novel views, resulting in different quality/speed trade-offs in different image regions. We present a principled approach to select the algorithm with the best quality/speed trade-off in each region. To do this, we propose a Bayesian approach, modeling the rendering quality, the rendering process and the validity of the assumptions of each algorithm. We then choose the algorithm to use with Maximum a Posteriori estimation. We demonstrate the utility of our approach on recent IBR algorithms which use over segmentation and are based on planar reprojection and shape-preserving warps respectively. Our algorithm selects the best rendering algorithm for each super pixel in a preprocessing step, at runtime our selective IBR uses this choice to achieve significant speedup at equivalent or better quality compared to previous algorithms.", "abstracts": [ { "abstractType": "Regular", "content": "Image-Based Rendering (IBR) algorithms generate high quality photo-realistic imagery without the burden of detailed modeling and expensive realistic rendering. Recent methods have different strengths and weaknesses, depending on 3D reconstruction quality and scene content. Each algorithm operates with a set of hypotheses about the scene and the novel views, resulting in different quality/speed trade-offs in different image regions. We present a principled approach to select the algorithm with the best quality/speed trade-off in each region. To do this, we propose a Bayesian approach, modeling the rendering quality, the rendering process and the validity of the assumptions of each algorithm. We then choose the algorithm to use with Maximum a Posteriori estimation. We demonstrate the utility of our approach on recent IBR algorithms which use over segmentation and are based on planar reprojection and shape-preserving warps respectively. Our algorithm selects the best rendering algorithm for each super pixel in a preprocessing step, at runtime our selective IBR uses this choice to achieve significant speedup at equivalent or better quality compared to previous algorithms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Image-Based Rendering (IBR) algorithms generate high quality photo-realistic imagery without the burden of detailed modeling and expensive realistic rendering. Recent methods have different strengths and weaknesses, depending on 3D reconstruction quality and scene content. Each algorithm operates with a set of hypotheses about the scene and the novel views, resulting in different quality/speed trade-offs in different image regions. We present a principled approach to select the algorithm with the best quality/speed trade-off in each region. To do this, we propose a Bayesian approach, modeling the rendering quality, the rendering process and the validity of the assumptions of each algorithm. We then choose the algorithm to use with Maximum a Posteriori estimation. We demonstrate the utility of our approach on recent IBR algorithms which use over segmentation and are based on planar reprojection and shape-preserving warps respectively. Our algorithm selects the best rendering algorithm for each super pixel in a preprocessing step, at runtime our selective IBR uses this choice to achieve significant speedup at equivalent or better quality compared to previous algorithms.", "fno": "8332a469", "keywords": [ "Rendering Computer Graphics", "Bayes Methods", "Three Dimensional Displays", "Real Time Systems", "Image Reconstruction", "Estimation", "Cameras", "Shape Preserving Warp", "Image Based Rendering And Modeling", "Superpixels" ], "authors": [ { "affiliation": null, "fullName": "Rodrigo Ortiz Cayon", "givenName": "Rodrigo Ortiz", "surname": "Cayon", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Abdelaziz Djelouah", "givenName": "Abdelaziz", "surname": "Djelouah", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "George Drettakis", "givenName": "George", "surname": "Drettakis", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-10-01T00:00:00", "pubType": "proceedings", "pages": "469-477", "year": "2015", "issn": null, "isbn": "978-1-4673-8332-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "8332a460", "articleId": "12OmNzwpUq0", "__typename": "AdjacentArticleType" }, "next": { "fno": "8332a478", "articleId": "12OmNwFid1n", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iwar/1999/0359/0/03590115", "title": "Photometric Image-Based Rendering for Virtual Lighting Image Synthesis", "doi": null, "abstractUrl": "/proceedings-article/iwar/1999/03590115/12OmNqNG3km", "parentPublication": { "id": "proceedings/iwar/1999/0359/0", "title": "Augmented Reality, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2003/7965/1/7965233", "title": "A system for active image-based rendering", "doi": null, "abstractUrl": "/proceedings-article/icme/2003/7965233/12OmNs0TKUB", "parentPublication": { "id": "proceedings/icme/2003/7965/1", "title": "2003 International Conference on Multimedia and Expo. ICME '03. Proceedings (Cat. No.03TH8698)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118d906", "title": "Bayesian View Synthesis and Image-Based Rendering Principles", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118d906/12OmNvrdHZZ", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a351", "title": "Multi-View Inpainting for Image-Based Scene Editing and Rendering", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a351/12OmNxEjXRB", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a286", "title": "Automatic 3D Car Model Alignment for Mixed Image-Based Rendering", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a286/12OmNy314dK", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2011/4602/0/4602a311", "title": "Noniterative Adaptive Sampling for Image-Based Rendering", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2011/4602a311/12OmNyRg4k0", "parentPublication": { "id": "proceedings/icvrv/2011/4602/0", "title": "2011 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011857", "title": "Architecture design and analysis of image-based rendering engine", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011857/12OmNzVoBzN", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwcse/2009/3881/1/3881a288", "title": "Image-Based Rendering Using Unstructured Image Set", "doi": null, "abstractUrl": "/proceedings-article/iwcse/2009/3881a288/12OmNzwHvmn", "parentPublication": { "id": "proceedings/iwcse/2009/3881/1", "title": "Computer Science and Engineering, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600h803", "title": "Learning Robust Image-Based Rendering on Sparse Scene Geometry via Depth Completion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600h803/1H1kEugxLxe", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a356", "title": "Where to Render: Studying Renderability for IBR of Large-Scale Scenes", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a356/1MNgROnDNsY", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1kEugxLxe", "doi": "10.1109/CVPR52688.2022.00766", "title": "Learning Robust Image-Based Rendering on Sparse Scene Geometry via Depth Completion", "normalizedTitle": "Learning Robust Image-Based Rendering on Sparse Scene Geometry via Depth Completion", "abstract": "Recent image-based rendering (IBR) methods usually adopt plenty of views to reconstruct dense scene geometry. However, the number of available views is limited in prac-tice. When only few views are provided, the performance of these methods drops off significantly, as the scene geometry becomes sparse as well. Therefore, in this paper, we propose Sparse-IBRNet (SIBRNet) to perform robust IBR on sparse scene geometry by depth completion. The SIBR-Net has two stages, geometry recovery (GR) stage and light blending (LB) stage. Specifically, GR stage takes sparse depth map and RGB as input to predict dense depth map by exploiting the correlation between two modals. As in-accuracy of the complete depth map may cause projection biases in the warping process, LB stage first uses a bias-corrected module (BCM) to rectify deviations, and then ag-gregates modified features from different views to render a novel view. Extensive experimental results demonstrate that our method performs best on sparse scene geometry than re-cent IBR methods, and it can generate better or comparable results as well when the geometric information is dense.<sup>1</sup>", "abstracts": [ { "abstractType": "Regular", "content": "Recent image-based rendering (IBR) methods usually adopt plenty of views to reconstruct dense scene geometry. However, the number of available views is limited in prac-tice. When only few views are provided, the performance of these methods drops off significantly, as the scene geometry becomes sparse as well. Therefore, in this paper, we propose Sparse-IBRNet (SIBRNet) to perform robust IBR on sparse scene geometry by depth completion. The SIBR-Net has two stages, geometry recovery (GR) stage and light blending (LB) stage. Specifically, GR stage takes sparse depth map and RGB as input to predict dense depth map by exploiting the correlation between two modals. As in-accuracy of the complete depth map may cause projection biases in the warping process, LB stage first uses a bias-corrected module (BCM) to rectify deviations, and then ag-gregates modified features from different views to render a novel view. Extensive experimental results demonstrate that our method performs best on sparse scene geometry than re-cent IBR methods, and it can generate better or comparable results as well when the geometric information is dense.<sup>1</sup>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent image-based rendering (IBR) methods usually adopt plenty of views to reconstruct dense scene geometry. However, the number of available views is limited in prac-tice. When only few views are provided, the performance of these methods drops off significantly, as the scene geometry becomes sparse as well. Therefore, in this paper, we propose Sparse-IBRNet (SIBRNet) to perform robust IBR on sparse scene geometry by depth completion. The SIBR-Net has two stages, geometry recovery (GR) stage and light blending (LB) stage. Specifically, GR stage takes sparse depth map and RGB as input to predict dense depth map by exploiting the correlation between two modals. As in-accuracy of the complete depth map may cause projection biases in the warping process, LB stage first uses a bias-corrected module (BCM) to rectify deviations, and then ag-gregates modified features from different views to render a novel view. Extensive experimental results demonstrate that our method performs best on sparse scene geometry than re-cent IBR methods, and it can generate better or comparable results as well when the geometric information is dense.1", "fno": "694600h803", "keywords": [ "Feature Extraction", "Image Colour Analysis", "Image Reconstruction", "Image Representation", "Image Sensors", "Learning Artificial Intelligence", "Rendering Computer Graphics", "Stereo Image Processing", "GR Stage", "Sparse Depth Map", "Dense Depth Map", "Complete Depth Map", "Sparse Scene Geometry", "Robust Image Based", "Depth Completion", "Recent Image Based", "Dense Scene Geometry", "Available Views", "Robust IBR", "Geometry Recovery Stage", "Light Blending Stage", "Geometry", "Computer Vision", "Correlation", "Rendering Computer Graphics", "Pattern Recognition", "Image Reconstruction" ], "authors": [ { "affiliation": "Fudan University,School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing, Shanghai Collaborative Innovation Center of Intelligent Visual Computing,Shanghai,China", "fullName": "Yuqi Sun", "givenName": "Yuqi", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "Fudan University,School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing, Shanghai Collaborative Innovation Center of Intelligent Visual Computing,Shanghai,China", "fullName": "Shili Zhou", "givenName": "Shili", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "Fudan University,School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing, Shanghai Collaborative Innovation Center of Intelligent Visual Computing,Shanghai,China", "fullName": "Ri Cheng", "givenName": "Ri", "surname": "Cheng", "__typename": "ArticleAuthorType" }, { "affiliation": "Fudan University,School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing, Shanghai Collaborative Innovation Center of Intelligent Visual Computing,Shanghai,China", "fullName": "Weimin Tan", "givenName": "Weimin", "surname": "Tan", "__typename": "ArticleAuthorType" }, { "affiliation": "Fudan University,School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing, Shanghai Collaborative Innovation Center of Intelligent Visual Computing,Shanghai,China", "fullName": "Bo Yan", "givenName": "Bo", "surname": "Yan", "__typename": "ArticleAuthorType" }, { "affiliation": "Fudan University,School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing, Shanghai Collaborative Innovation Center of Intelligent Visual Computing,Shanghai,China", "fullName": "Lang Fu", "givenName": "Lang", "surname": "Fu", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "7803-7813", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1kEr6Y8UM", "name": "pcvpr202269460-09878554s1-mm_694600h803.zip", "size": "1.91 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878554s1-mm_694600h803.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600h793", "articleId": "1H1mbI4myYw", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600h814", "articleId": "1H1jnVQyouc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2014/5118/0/5118d906", "title": "Bayesian View Synthesis and Image-Based Rendering Principles", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118d906/12OmNvrdHZZ", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2016/1552/0/07574699", "title": "Computer generated hologram from Multiview-plus-Depth data considering specular reflections", "doi": null, "abstractUrl": "/proceedings-article/icmew/2016/07574699/12OmNwDSdFk", "parentPublication": { "id": "proceedings/icmew/2016/1552/0", "title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a351", "title": "Multi-View Inpainting for Image-Based Scene Editing and Rendering", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a351/12OmNxEjXRB", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a469", "title": "A Bayesian Approach for Selective Image-Based Rendering Using Superpixels", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a469/12OmNzR8Cwl", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600i259", "title": "Light Field Neural Rendering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600i259/1H1j93SQIG4", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300i597", "title": "Neural Inverse Rendering of an Indoor Scene From a Single Image", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300i597/1hVlOrVOpck", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09385924", "title": "Instant Panoramic Texture Mapping with Semantic Object Matching for Large-Scale Urban Scene Reproduction", "doi": null, "abstractUrl": "/journal/tg/2021/05/09385924/1seinXN8TwQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2021/1952/0/09466274", "title": "View-dependent Scene Appearance Synthesis using Inverse Rendering from Light Fields", "doi": null, "abstractUrl": "/proceedings-article/iccp/2021/09466274/1uSSV7tRhSw", "parentPublication": { "id": "proceedings/iccp/2021/1952/0", "title": "2021 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900e688", "title": "IBRNet: Learning Multi-View Image-Based Rendering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900e688/1yeIC9n861q", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900g222", "title": "NeuralHumanFVV: Real-Time Neural Volumetric Human Performance Rendering using RGB Cameras", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900g222/1yeIMelAx8s", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzayN6r", "title": "2014 International Conference on Information Science and Applications (ICISA)", "acronym": "icisa", "groupId": "1800053", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNrJAdSv", "doi": "10.1109/ICISA.2014.6847398", "title": "Eye Detection for near Infrared Based Gaze Tracking System", "normalizedTitle": "Eye Detection for near Infrared Based Gaze Tracking System", "abstract": "This paper presents eye detection method for gaze tracking system using the features of eye and corneal reflection. The proposed method sequentially discards the regions that are expected not to be an eye by classifiers based on the features such as pupil intensity and appearance, corneal reflection intensity, and so on. The classifiers are designed with some empirical parameters, thus the proposed method does not need training process.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents eye detection method for gaze tracking system using the features of eye and corneal reflection. The proposed method sequentially discards the regions that are expected not to be an eye by classifiers based on the features such as pupil intensity and appearance, corneal reflection intensity, and so on. The classifiers are designed with some empirical parameters, thus the proposed method does not need training process.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents eye detection method for gaze tracking system using the features of eye and corneal reflection. The proposed method sequentially discards the regions that are expected not to be an eye by classifiers based on the features such as pupil intensity and appearance, corneal reflection intensity, and so on. The classifiers are designed with some empirical parameters, thus the proposed method does not need training process.", "fno": "06847398", "keywords": [ "Reflection", "Cameras", "Iris", "Probes", "Face", "Feature Extraction", "Broadcasting" ], "authors": [ { "affiliation": null, "fullName": "Hyun-Cheol Kim", "givenName": "Hyun-Cheol", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jihun Cha", "givenName": "Jihun", "surname": "Cha", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Won Don Lee", "givenName": "Won Don", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "icisa", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-05-01T00:00:00", "pubType": "proceedings", "pages": "1-3", "year": "2014", "issn": null, "isbn": "978-1-4799-4443-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06847397", "articleId": "12OmNxcMSdd", "__typename": "AdjacentArticleType" }, "next": { "fno": "06847399", "articleId": "12OmNqFrGuf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/gcis/2009/3571/2/3571b133", "title": "Key Techniques of Eye Gaze Tracking Based on Pupil Corneal Reflection", "doi": null, "abstractUrl": "/proceedings-article/gcis/2009/3571b133/12OmNA0vo1q", "parentPublication": { "id": "proceedings/gcis/2009/3571/2", "title": "2009 WRI Global Congress on Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761343", "title": "3D gaze estimation with a single camera without IR illumination", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761343/12OmNvvLi4R", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2015/7905/0/7905a904", "title": "Implementation of an Eye Gaze Tracking System for the Disabled People", "doi": null, "abstractUrl": "/proceedings-article/aina/2015/7905a904/12OmNwEJ115", "parentPublication": { "id": "proceedings/aina/2015/7905/0", "title": "2015 IEEE 29th International Conference on Advanced Information Networking and Applications (AINA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2014/7981/0/7981a458", "title": "Eye Detection for Gaze Tracker with Near Infrared Illuminator", "doi": null, "abstractUrl": "/proceedings-article/cse/2014/7981a458/12OmNx3q6Yv", "parentPublication": { "id": "proceedings/cse/2014/7981/0", "title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2003/1900/2/190020451", "title": "Eye Gaze Tracking Using an Active Stereo Head", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2003/190020451/12OmNxRWI2Y", "parentPublication": { "id": "proceedings/cvpr/2003/1900/2", "title": "2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2002/1858/0/18580191", "title": "Appearance-based Eye Gaze Estimation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2002/18580191/12OmNzYwcdp", "parentPublication": { "id": "proceedings/wacv/2002/1858/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2014/7978/0/7978a574", "title": "Gender and Age Categorization Using Gaze Analysis", "doi": null, "abstractUrl": "/proceedings-article/sitis/2014/7978a574/12OmNzZ5ogk", "parentPublication": { "id": "proceedings/sitis/2014/7978/0", "title": "2014 Tenth International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500d937", "title": "Event-Based Kilohertz Eye Tracking using Coded Differential Lighting", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500d937/1B13uiL4IUM", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412066", "title": "Detection and Correspondence Matching of Corneal Reflections for Eye Tracking Using Deep Learning", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412066/1tmjH1aA4dG", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyjLoRq", "title": "2012 Third International Conference on Computing, Communication and Networking Technologies (ICCCNT 2012)", "acronym": "icccnt", "groupId": "1802177", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNvwC5wg", "doi": "10.1109/ICCCNT.2012.6396075", "title": "A novel iris recognition algorithm", "normalizedTitle": "A novel iris recognition algorithm", "abstract": "Goal of the proposed iris recognition is to recognize human identity through the textural characteristics of one's iris muscular patterns. Even though eye color is dependent on heredity, in contrast to this, iris is independent and uncorrelated even for twins. Out of various biometrics such as finger and hand geometry, face, ear and voice recognition, iris recognition has been acknowledged as one of the most accurate biometric modalities because of its high recognition rate. In this proposed iris recognition method, pupil localization is done by using negative function and four neighbours method so that irrespective of pupil's contour, either circle or ellipse, the pupil's boundary is detected accurately. For iris outer boundary detection, contrast enhancement, special wedges and thresholding techniques are used to isolate the specific iris regions without eyelid and eyelash occlusions. Now the resultant iris portion alone is transformed into polar coordinate system for normalization process. Histogram equalization technique is used for enhancing the normalized iris image. For feature extraction and matching process, cumulative sum-based change analysis and hamming distance are employed. When compared with the existing algorithms, this proposed algorithm is robust, accurate and also has low computational time and complexity.", "abstracts": [ { "abstractType": "Regular", "content": "Goal of the proposed iris recognition is to recognize human identity through the textural characteristics of one's iris muscular patterns. Even though eye color is dependent on heredity, in contrast to this, iris is independent and uncorrelated even for twins. Out of various biometrics such as finger and hand geometry, face, ear and voice recognition, iris recognition has been acknowledged as one of the most accurate biometric modalities because of its high recognition rate. In this proposed iris recognition method, pupil localization is done by using negative function and four neighbours method so that irrespective of pupil's contour, either circle or ellipse, the pupil's boundary is detected accurately. For iris outer boundary detection, contrast enhancement, special wedges and thresholding techniques are used to isolate the specific iris regions without eyelid and eyelash occlusions. Now the resultant iris portion alone is transformed into polar coordinate system for normalization process. Histogram equalization technique is used for enhancing the normalized iris image. For feature extraction and matching process, cumulative sum-based change analysis and hamming distance are employed. When compared with the existing algorithms, this proposed algorithm is robust, accurate and also has low computational time and complexity.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Goal of the proposed iris recognition is to recognize human identity through the textural characteristics of one's iris muscular patterns. Even though eye color is dependent on heredity, in contrast to this, iris is independent and uncorrelated even for twins. Out of various biometrics such as finger and hand geometry, face, ear and voice recognition, iris recognition has been acknowledged as one of the most accurate biometric modalities because of its high recognition rate. In this proposed iris recognition method, pupil localization is done by using negative function and four neighbours method so that irrespective of pupil's contour, either circle or ellipse, the pupil's boundary is detected accurately. For iris outer boundary detection, contrast enhancement, special wedges and thresholding techniques are used to isolate the specific iris regions without eyelid and eyelash occlusions. Now the resultant iris portion alone is transformed into polar coordinate system for normalization process. Histogram equalization technique is used for enhancing the normalized iris image. For feature extraction and matching process, cumulative sum-based change analysis and hamming distance are employed. When compared with the existing algorithms, this proposed algorithm is robust, accurate and also has low computational time and complexity.", "fno": "06396075", "keywords": [ "Computational Complexity", "Edge Detection", "Equalisers", "Eye", "Feature Extraction", "Image Colour Analysis", "Image Enhancement", "Image Matching", "Image Segmentation", "Image Texture", "Iris Recognition", "Iris Recognition Algorithm", "Human Identity Recognition", "Textural Characteristics", "Iris Muscular Pattern", "Eye Color", "Biometric Modalities", "Recognition Rate", "Pupil Localization", "Negative Function", "Four Neighbours Method", "Pupil Contour", "Pupil Boundary Detection", "Iris Outer Boundary Detection", "Contrast Enhancement", "Thresholding Technique", "Iris Region", "Polar Coordinate System", "Normalization Process", "Histogram Equalization", "Normalized Iris Image Enhancement", "Feature Extraction", "Matching Process", "Cumulative Sum Based Change Analysis", "Hamming Distance", "Computational Time", "Computational Complexity", "Image Segmentation", "Feature Extraction", "Physiology", "Iris Recognition", "Image Recognition", "Indexes", "Pupil Localization", "Iris Localization", "Thresholding", "Iris Segmentation", "Cumulative Sum", "Hamming Distance" ], "authors": [ { "affiliation": "Mahendra Institute of Technology, Namakkal District, India", "fullName": "R. P. Ramkumar", "givenName": "R. P.", "surname": "Ramkumar", "__typename": "ArticleAuthorType" }, { "affiliation": "Nandha College of Technology, Erode District. Tamilnadu, India", "fullName": "S. Arumugam", "givenName": "S.", "surname": "Arumugam", "__typename": "ArticleAuthorType" } ], "idPrefix": "icccnt", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2012", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06396074", "articleId": "12OmNBlofNn", "__typename": "AdjacentArticleType" }, "next": { "fno": "06395906", "articleId": "12OmNz61dcI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/gocict/2015/2314/0/2314a051", "title": "An Elliptic Curve Algorithm for Iris Pattern Recognition", "doi": null, "abstractUrl": "/proceedings-article/gocict/2015/2314a051/12OmNAlvHRs", "parentPublication": { "id": "proceedings/gocict/2015/2314/0", "title": "2015 Annual Global Online Conference on Information and Computer Technology (GOCICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2013/0174/0/06607828", "title": "A novel approach for code match in iris recognition", "doi": null, "abstractUrl": "/proceedings-article/icis/2013/06607828/12OmNB9bvjK", "parentPublication": { "id": "proceedings/icis/2013/0174/0", "title": "2013 IEEE/ACIS 12th International Conference on Computer and Information Science (ICIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccct/2012/3149/0/06394726", "title": "A Novel Approach to Minimize the Impact of Non Ideal Samples in Iris Recognition System", "doi": null, "abstractUrl": "/proceedings-article/iccct/2012/06394726/12OmNBSSVcv", "parentPublication": { "id": "proceedings/iccct/2012/3149/0", "title": "2012 3rd International Conference on Computer and Communication Technology (ICCCT 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2011/4584/0/4584b127", "title": "Fast Iris Boundary Location Based on Window Mapping Method", "doi": null, "abstractUrl": "/proceedings-article/cis/2011/4584b127/12OmNBl6ENe", "parentPublication": { "id": "proceedings/cis/2011/4584/0", "title": "2011 Seventh International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscsct/2008/3498/2/3498b045", "title": "Detection of Non-iris Region in the Iris Recognition", "doi": null, "abstractUrl": "/proceedings-article/iscsct/2008/3498b045/12OmNrK9q0Q", "parentPublication": { "id": "proceedings/iscsct/2008/3498/1", "title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fskd/2008/3305/3/3305c063", "title": "A New Iris Region Segmentation Method", "doi": null, "abstractUrl": "/proceedings-article/fskd/2008/3305c063/12OmNwNwzLa", "parentPublication": { "id": "proceedings/fskd/2008/3305/3", "title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isda/2008/3382/2/3382b340", "title": "A Novel Template Protection Algorithm for Iris Recognition", "doi": null, "abstractUrl": "/proceedings-article/isda/2008/3382b340/12OmNxETaok", "parentPublication": { "id": "proceedings/isda/2008/3382/2", "title": "2008 Eighth International Conference on Intelligent Systems Design and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/etcs/2009/3557/3/3557e316", "title": "A New Localization Method for Iris Recognition Based on Angular Integral Projection Function", "doi": null, "abstractUrl": "/proceedings-article/etcs/2009/3557e316/12OmNz6iOpM", "parentPublication": { "id": "proceedings/etcs/2009/3557/3", "title": "Education Technology and Computer Science, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncis/2011/4355/1/4355a381", "title": "A Iris Recognition Based on Geometric Information", "doi": null, "abstractUrl": "/proceedings-article/ncis/2011/4355a381/12OmNzRZq40", "parentPublication": { "id": "proceedings/ncis/2011/4355/1", "title": "Network Computing and Information Security, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aicis/2018/9188/0/918800a089", "title": "Iris Recognition Using Principal Component Analysis", "doi": null, "abstractUrl": "/proceedings-article/aicis/2018/918800a089/17PYElGZUBp", "parentPublication": { "id": "proceedings/aicis/2018/9188/0", "title": "2018 1st Annual International Conference on Information and Sciences (AiCIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNs0kyrx", "title": "2014 Fourth International Conference of Emerging Applications of Information Technology (EAIT)", "acronym": "eait", "groupId": "1800320", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNxwWoxY", "doi": "10.1109/EAIT.2014.28", "title": "A Fast and Robust Method for Iris Localization", "normalizedTitle": "A Fast and Robust Method for Iris Localization", "abstract": "A novel method for localization of iris images is proposed in this paper. Especially the iris localization plays a vital role in an iris recognition system such that it increases both speed and performances of the recognition system to a great extent. The process of iris localization is used to detect an annular region between the pupil (black portion) and the sclera (white portion) of an eye. Here, for iris localization, we use a modified version of Circular Hough transform for both pupil and iris segmentation. Extensive experimental results have been performed to show that proposed algorithm has satisfying performance as compared to existing methods for iris localization. The performance of the proposed system is tested with IITD and MMU1 iris databases.", "abstracts": [ { "abstractType": "Regular", "content": "A novel method for localization of iris images is proposed in this paper. Especially the iris localization plays a vital role in an iris recognition system such that it increases both speed and performances of the recognition system to a great extent. The process of iris localization is used to detect an annular region between the pupil (black portion) and the sclera (white portion) of an eye. Here, for iris localization, we use a modified version of Circular Hough transform for both pupil and iris segmentation. Extensive experimental results have been performed to show that proposed algorithm has satisfying performance as compared to existing methods for iris localization. The performance of the proposed system is tested with IITD and MMU1 iris databases.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A novel method for localization of iris images is proposed in this paper. Especially the iris localization plays a vital role in an iris recognition system such that it increases both speed and performances of the recognition system to a great extent. The process of iris localization is used to detect an annular region between the pupil (black portion) and the sclera (white portion) of an eye. Here, for iris localization, we use a modified version of Circular Hough transform for both pupil and iris segmentation. Extensive experimental results have been performed to show that proposed algorithm has satisfying performance as compared to existing methods for iris localization. The performance of the proposed system is tested with IITD and MMU1 iris databases.", "fno": "4272a262", "keywords": [ "Iris Recognition", "Iris", "Databases", "Transforms", "Image Edge Detection", "Robustness", "Image Segmentation", "Outer Boundary", "Biometric", "Iris Localization", "Circular Hough Transform", "Inner Boundary" ], "authors": [ { "affiliation": null, "fullName": "Saiyed Umer", "givenName": "Saiyed", "surname": "Umer", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Bibhas Chandra Dhara", "givenName": "Bibhas Chandra", "surname": "Dhara", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Bhabatosh Chanda", "givenName": "Bhabatosh", "surname": "Chanda", "__typename": "ArticleAuthorType" } ], "idPrefix": "eait", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-12-01T00:00:00", "pubType": "proceedings", "pages": "262-267", "year": "2014", "issn": null, "isbn": "978-1-4799-4272-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4272a256", "articleId": "12OmNBC8ABD", "__typename": "AdjacentArticleType" }, "next": { "fno": "4272a268", "articleId": "12OmNB836H2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icris/2016/4155/0/4155a353", "title": "Research on Iris Localization Algorithms", "doi": null, "abstractUrl": "/proceedings-article/icris/2016/4155a353/12OmNBQkx6M", "parentPublication": { "id": "proceedings/icris/2016/4155/0", "title": "2016 International Conference on Robots & Intelligent System (ICRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761429", "title": "An incremental method for accurate iris segmentation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761429/12OmNrMZpnr", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460894", "title": "Accurate iris localization using contour segments", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460894/12OmNviHKer", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363197", "title": "A Fast and Accurate Iris Localization Technique for Healthcare Security System", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363197/12OmNy68ECt", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109c852", "title": "A Robust Iris Localization Method Using an Active Contour Model and Hough Transform", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109c852/12OmNyFCvUZ", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eait/2011/4329/0/4329a089", "title": "A Fast Method for Iris Localization", "doi": null, "abstractUrl": "/proceedings-article/eait/2011/4329a089/12OmNyrIayn", "parentPublication": { "id": "proceedings/eait/2011/4329/0", "title": "Emerging Applications of Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/socpar/2009/3879/0/3879a393", "title": "Fast Algorithm for Iris Localization Using Daugman Circular Integro Differential Operator", "doi": null, "abstractUrl": "/proceedings-article/socpar/2009/3879a393/12OmNyxFKc9", "parentPublication": { "id": "proceedings/socpar/2009/3879/0", "title": "Soft Computing and Pattern Recognition, International Conference of", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/etcs/2009/3557/3/3557e316", "title": "A New Localization Method for Iris Recognition Based on Angular Integral Projection Function", "doi": null, "abstractUrl": "/proceedings-article/etcs/2009/3557e316/12OmNz6iOpM", "parentPublication": { "id": "proceedings/etcs/2009/3557/3", "title": "Education Technology and Computer Science, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209b752", "title": "IDEM: Iris DEtection on Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209b752/12OmNzYeANQ", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a991", "title": "Segmentation-free Direct Iris Localization Networks", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a991/1L8qic3zUcw", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzQhP7U", "title": "2014 Fifth International Conference on Signal and Image Processing (ICSIP)", "acronym": "icsip", "groupId": "1800261", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNy50g6r", "doi": "10.1109/ICSIP.2014.56", "title": "A Novel Approach to Circular Edge Detection for Iris Image Segmentation", "normalizedTitle": "A Novel Approach to Circular Edge Detection for Iris Image Segmentation", "abstract": "This paper details about segmentation of iris region for iris recognition as a biometrical personal identification and verification. Human iris is unique and differs from one individual to another. Just as finger prints, biomedical proves human irises are distinct. Also, iris can be easily accessed from any visual capturing device. The two dimensional structure of iris further assists the technology. This paper describes the extraction of iris region from an image of the human eye. The proposed algorithm defines a new method to segment Iris from the image. It's a new technique for circular edge detection particularly for Iris recognition. An image undergoes various operations like black and white conversion, edge detection and filtering. The fact that the intensity of iris lies between the intensities of pupil and rest of the eye is the key here to extract iris. A simple vertical and horizontal scan is done over the image to get the tangents of the circles. A mathematical analysis is done on the images to get the radius and the center of the circle and hence the inner and outer circles of the iris are drawn or Hough transform can be done using the obtained values for more accuracy. We are constructed the circles after obtaining the values.", "abstracts": [ { "abstractType": "Regular", "content": "This paper details about segmentation of iris region for iris recognition as a biometrical personal identification and verification. Human iris is unique and differs from one individual to another. Just as finger prints, biomedical proves human irises are distinct. Also, iris can be easily accessed from any visual capturing device. The two dimensional structure of iris further assists the technology. This paper describes the extraction of iris region from an image of the human eye. The proposed algorithm defines a new method to segment Iris from the image. It's a new technique for circular edge detection particularly for Iris recognition. An image undergoes various operations like black and white conversion, edge detection and filtering. The fact that the intensity of iris lies between the intensities of pupil and rest of the eye is the key here to extract iris. A simple vertical and horizontal scan is done over the image to get the tangents of the circles. A mathematical analysis is done on the images to get the radius and the center of the circle and hence the inner and outer circles of the iris are drawn or Hough transform can be done using the obtained values for more accuracy. We are constructed the circles after obtaining the values.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper details about segmentation of iris region for iris recognition as a biometrical personal identification and verification. Human iris is unique and differs from one individual to another. Just as finger prints, biomedical proves human irises are distinct. Also, iris can be easily accessed from any visual capturing device. The two dimensional structure of iris further assists the technology. This paper describes the extraction of iris region from an image of the human eye. The proposed algorithm defines a new method to segment Iris from the image. It's a new technique for circular edge detection particularly for Iris recognition. An image undergoes various operations like black and white conversion, edge detection and filtering. The fact that the intensity of iris lies between the intensities of pupil and rest of the eye is the key here to extract iris. A simple vertical and horizontal scan is done over the image to get the tangents of the circles. A mathematical analysis is done on the images to get the radius and the center of the circle and hence the inner and outer circles of the iris are drawn or Hough transform can be done using the obtained values for more accuracy. We are constructed the circles after obtaining the values.", "fno": "5100a316", "keywords": [ "Iris Recognition", "Image Edge Detection", "Eyelashes", "Transforms", "Iris", "Image Segmentation", "Eyelids", "Iris Recognition", "Iris Segmentation", "Biometric", "Circular Edge Detection", "Threshold" ], "authors": [ { "affiliation": null, "fullName": "H.R. Shashidhara", "givenName": "H.R.", "surname": "Shashidhara", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "A.R. Aswath", "givenName": "A.R.", "surname": "Aswath", "__typename": "ArticleAuthorType" } ], "idPrefix": "icsip", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-01-01T00:00:00", "pubType": "proceedings", "pages": "316-320", "year": "2014", "issn": null, "isbn": "978-0-7695-5100-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5100a310", "articleId": "12OmNzCF4Za", "__typename": "AdjacentArticleType" }, "next": { "fno": "5100a323", "articleId": "12OmNvjgWvL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icis/2013/0174/0/06607828", "title": "A novel approach for code match in iris recognition", "doi": null, "abstractUrl": "/proceedings-article/icis/2013/06607828/12OmNB9bvjK", "parentPublication": { "id": "proceedings/icis/2013/0174/0", "title": "2013 IEEE/ACIS 12th International Conference on Computer and Information Science (ICIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icris/2016/4155/0/4155a353", "title": "Research on Iris Localization Algorithms", "doi": null, "abstractUrl": "/proceedings-article/icris/2016/4155a353/12OmNBQkx6M", "parentPublication": { "id": "proceedings/icris/2016/4155/0", "title": "2016 International Conference on Robots & Intelligent System (ICRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci/2006/0475/2/04216524", "title": "Improving the Performance of Iris Recogniton System Using Eyelids and Eyelashes Detection and Iris Image Enhancement", "doi": null, "abstractUrl": "/proceedings-article/icci/2006/04216524/12OmNCfAPJI", "parentPublication": { "id": "proceedings/icci/2006/0475/2", "title": "Cognitive Informatics, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2016/2303/0/2303a183", "title": "An Approach of Noisy Color Iris Segmentation Based on Hybrid Image Processing Techniques", "doi": null, "abstractUrl": "/proceedings-article/cw/2016/2303a183/12OmNro0I7t", "parentPublication": { "id": "proceedings/cw/2016/2303/0", "title": "2016 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icccnt/2012/9999/0/06396011", "title": "Comparative analysis of iris segmentation methods along with quality enhancement", "doi": null, "abstractUrl": "/proceedings-article/icccnt/2012/06396011/12OmNvSbBAn", "parentPublication": { "id": "proceedings/icccnt/2012/9999/0", "title": "2012 Third International Conference on Computing, Communication and Networking Technologies (ICCCNT 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761844", "title": "Iris localization based on multi-resolution analysis", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761844/12OmNxisR0u", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209c489", "title": "Highly Usable and Accurate Iris Segmentation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209c489/12OmNxveNL7", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363197", "title": "A Fast and Accurate Iris Localization Technique for Healthcare Security System", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363197/12OmNy68ECt", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a351", "title": "A Performance Comparison between Circular and Spline-Based Methods for Iris Segmentation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a351/12OmNzRZpTB", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a991", "title": "Segmentation-free Direct Iris Localization Networks", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a991/1L8qic3zUcw", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAsTgXc", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNzUPpfB", "doi": "10.1109/ICCVW.2011.6130505", "title": "Illumination-free gaze estimation method for first-person vision wearable device", "normalizedTitle": "Illumination-free gaze estimation method for first-person vision wearable device", "abstract": "Gaze estimation is a key technology to understand a person's interests and intents, and it is becoming more popular in daily situations such as driving scenarios. Wearable gaze estimation devices are use for long periods of time, therefore non-active sources are not desirable from a safety point of view. Gaze estimation that does not rely on active source, is performed by locating iris position. To estimate the iris position accurately, most studies use ellipse fitting in which the ellipse is defined by 5 parameters(position (x,y), rotation angle, semi-major axis and semi-minor axis). We claim that, for iris position estimation, 5 parameters are redundant because they might be influenced by non-iris edges. Therefore, we propose to use 2 parameters(position) introducing a 3D eye model(the transformation between eye and camera coordinate and eyeball/iris size). Given 3D eye model, projected ellipse that represents iris shape can be specified only by position under weak-perspective approximation. We quantitatively evaluate our method on both iris position and gaze estimation. Our results show that our method outperforms other state-of-the-art's iris estimation and is competitive to commercial product that use infrared ray with respect to both accuracy and robustness.", "abstracts": [ { "abstractType": "Regular", "content": "Gaze estimation is a key technology to understand a person's interests and intents, and it is becoming more popular in daily situations such as driving scenarios. Wearable gaze estimation devices are use for long periods of time, therefore non-active sources are not desirable from a safety point of view. Gaze estimation that does not rely on active source, is performed by locating iris position. To estimate the iris position accurately, most studies use ellipse fitting in which the ellipse is defined by 5 parameters(position (x,y), rotation angle, semi-major axis and semi-minor axis). We claim that, for iris position estimation, 5 parameters are redundant because they might be influenced by non-iris edges. Therefore, we propose to use 2 parameters(position) introducing a 3D eye model(the transformation between eye and camera coordinate and eyeball/iris size). Given 3D eye model, projected ellipse that represents iris shape can be specified only by position under weak-perspective approximation. We quantitatively evaluate our method on both iris position and gaze estimation. Our results show that our method outperforms other state-of-the-art's iris estimation and is competitive to commercial product that use infrared ray with respect to both accuracy and robustness.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Gaze estimation is a key technology to understand a person's interests and intents, and it is becoming more popular in daily situations such as driving scenarios. Wearable gaze estimation devices are use for long periods of time, therefore non-active sources are not desirable from a safety point of view. Gaze estimation that does not rely on active source, is performed by locating iris position. To estimate the iris position accurately, most studies use ellipse fitting in which the ellipse is defined by 5 parameters(position (x,y), rotation angle, semi-major axis and semi-minor axis). We claim that, for iris position estimation, 5 parameters are redundant because they might be influenced by non-iris edges. Therefore, we propose to use 2 parameters(position) introducing a 3D eye model(the transformation between eye and camera coordinate and eyeball/iris size). Given 3D eye model, projected ellipse that represents iris shape can be specified only by position under weak-perspective approximation. We quantitatively evaluate our method on both iris position and gaze estimation. Our results show that our method outperforms other state-of-the-art's iris estimation and is competitive to commercial product that use infrared ray with respect to both accuracy and robustness.", "fno": "06130505", "keywords": [ "Approximation Theory", "Iris Recognition", "Pose Estimation", "Illumination Free Gaze Estimation Method", "First Person Vision Wearable Device", "Wearable Gaze Estimation Device", "Nonactive Source", "Iris Position Estimation", "Noniris Edge", "3 D Eye Model", "Iris Shape Representation", "Infrared Ray", "Iris", "Estimation", "Image Edge Detection", "Shape", "Three Dimensional Displays", "Robustness", "Solid Modeling" ], "authors": [ { "affiliation": "Carnegie Mellon University, USA", "fullName": "Akihiro Tsukada", "givenName": "Akihiro", "surname": "Tsukada", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University, USA", "fullName": "Motoki Shino", "givenName": "Motoki", "surname": "Shino", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University, USA", "fullName": "Michael Devyver", "givenName": "Michael", "surname": "Devyver", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University, USA", "fullName": "Takeo Kanade", "givenName": "Takeo", "surname": "Kanade", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-11-01T00:00:00", "pubType": "proceedings", "pages": "2084-2091", "year": "2011", "issn": null, "isbn": "978-1-4673-0063-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06130504", "articleId": "12OmNB9bver", "__typename": "AdjacentArticleType" }, "next": { "fno": "06130506", "articleId": "12OmNzCF4Vd", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2013/5545/0/06553735", "title": "Combining first-person and third-person gaze for attention recognition", "doi": null, "abstractUrl": "/proceedings-article/fg/2013/06553735/12OmNvEhg0x", "parentPublication": { "id": "proceedings/fg/2013/5545/0", "title": "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460894", "title": "Accurate iris localization using contour segments", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460894/12OmNviHKer", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890322", "title": "Realtime gaze estimation with online calibration", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890322/12OmNvjyxUU", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761343", "title": "3D gaze estimation with a single camera without IR illumination", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761343/12OmNvvLi4R", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2013/5053/0/06475042", "title": "Unwrapping the eye for visible-spectrum gaze tracking on wearable devices", "doi": null, "abstractUrl": "/proceedings-article/wacv/2013/06475042/12OmNwE9OwM", "parentPublication": { "id": "proceedings/wacv/2013/5053/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2003/1950/1/195010136", "title": "Eye Gaze Estimation from a Single Image of One Eye", "doi": null, "abstractUrl": "/proceedings-article/iccv/2003/195010136/12OmNyv7mcM", "parentPublication": { "id": "proceedings/iccv/2003/1950/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2016/1437/0/1437a792", "title": "Person-Independent 3D Gaze Estimation Using Face Frontalization", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2016/1437a792/12OmNzYwbWh", "parentPublication": { "id": "proceedings/cvprw/2016/1437/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2018/6857/0/685700a291", "title": "Discriminative Robust Gaze Estimation Using Kernel-DMCCA Fusion", "doi": null, "abstractUrl": "/proceedings-article/ism/2018/685700a291/17D45XvMccq", "parentPublication": { "id": "proceedings/ism/2018/6857/0", "title": "2018 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a655", "title": "A Multi-Modal Gaze Tracking Algorithm", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a655/1ehBL8sk06I", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0", "title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09389650", "title": "EllSeg: An Ellipse Segmentation Framework for Robust Gaze Tracking", "doi": null, "abstractUrl": "/journal/tg/2021/05/09389650/1smZUThnFi8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzVXNJa", "title": "Convergence Information Technology, International Conference on", "acronym": "iccit", "groupId": "1001590", "volume": "2", "displayVolume": "2", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNzX6cfT", "doi": "10.1109/ICCIT.2008.176", "title": "A Novel Compound Approach for Iris Segmentation", "normalizedTitle": "A Novel Compound Approach for Iris Segmentation", "abstract": "With a growing emphasis on human identification, iris recognition as a biometric identification has recently received increasing attention. The first step in iris recognition is segmentation. In this paper a new segmentation approach is offered which does not use any information of color or texture as the segmentation cues. Instead, we use random sample consensus method to fit an ellipse or a circle to the edge information of iris boundary. The presented approach is robust against the iris texture variations and other trouble makers like eyelid and specularity effect in pupil area. The extracted curves in this method are more conformable with iris boundaries than the curves obtained by other conventional methods.", "abstracts": [ { "abstractType": "Regular", "content": "With a growing emphasis on human identification, iris recognition as a biometric identification has recently received increasing attention. The first step in iris recognition is segmentation. In this paper a new segmentation approach is offered which does not use any information of color or texture as the segmentation cues. Instead, we use random sample consensus method to fit an ellipse or a circle to the edge information of iris boundary. The presented approach is robust against the iris texture variations and other trouble makers like eyelid and specularity effect in pupil area. The extracted curves in this method are more conformable with iris boundaries than the curves obtained by other conventional methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With a growing emphasis on human identification, iris recognition as a biometric identification has recently received increasing attention. The first step in iris recognition is segmentation. In this paper a new segmentation approach is offered which does not use any information of color or texture as the segmentation cues. Instead, we use random sample consensus method to fit an ellipse or a circle to the edge information of iris boundary. The presented approach is robust against the iris texture variations and other trouble makers like eyelid and specularity effect in pupil area. The extracted curves in this method are more conformable with iris boundaries than the curves obtained by other conventional methods.", "fno": "3407c657", "keywords": [ "Biometric Identification", "Non Linear Data Fitting", "Iris Segmentation", "Ransac" ], "authors": [ { "affiliation": null, "fullName": "Hamed Ranjzad", "givenName": "Hamed", "surname": "Ranjzad", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hossein Ebrahimnezhad", "givenName": "Hossein", "surname": "Ebrahimnezhad", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Afshin Ebrahimi", "givenName": "Afshin", "surname": "Ebrahimi", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccit", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-11-01T00:00:00", "pubType": "proceedings", "pages": "657-661", "year": "2008", "issn": null, "isbn": "978-0-7695-3407-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3407c651", "articleId": "12OmNz4SOyo", "__typename": "AdjacentArticleType" }, "next": { "fno": "3407c662", "articleId": "12OmNrGb2ek", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209a527", "title": "A Ground Truth for Iris Segmentation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a527/12OmNvDI44g", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccgi/2010/4181/0/4181a099", "title": "Personal Authentication Using Human Iris Recognition Based on Embedded Zerotree Wavelet Coding", "doi": null, "abstractUrl": "/proceedings-article/iccgi/2010/4181a099/12OmNvzJGbK", "parentPublication": { "id": "proceedings/iccgi/2010/4181/0", "title": "Computing in the Global Information Technology, International Multi-Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/est/2010/4175/0/4175a007", "title": "Are Two Eyes Better than One? An Experimental Investigation on Dual Iris Recognition", "doi": null, "abstractUrl": "/proceedings-article/est/2010/4175a007/12OmNwFicZn", "parentPublication": { "id": "proceedings/est/2010/4175/0", "title": "2010 International Conference on Emerging Security Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/synasc/2009/3964/0/3964a384", "title": "Exploring New Directions in Iris Recognition", "doi": null, "abstractUrl": "/proceedings-article/synasc/2009/3964a384/12OmNx6xHoH", "parentPublication": { "id": "proceedings/synasc/2009/3964/0", "title": "2009 11th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsip/2014/5100/0/5100a316", "title": "A Novel Approach to Circular Edge Detection for Iris Image Segmentation", "doi": null, "abstractUrl": "/proceedings-article/icsip/2014/5100a316/12OmNy50g6r", "parentPublication": { "id": "proceedings/icsip/2014/5100/0", "title": "2014 Fifth International Conference on Signal and Image Processing (ICSIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iita/2008/3497/1/3497a467", "title": "A Novel Registration Approach Based on Curve Fitting Technique for Iris Images", "doi": null, "abstractUrl": "/proceedings-article/iita/2008/3497a467/12OmNyqiaSq", "parentPublication": { "id": "proceedings/iita/2008/3497/3", "title": "2008 Second International Symposium on Intelligent Information Technology Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109c857", "title": "IS_IS: Iris Segmentation for Identification Systems", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109c857/12OmNzkuKyM", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/09/ttp2009091670", "title": "Toward Accurate and Fast Iris Segmentation for Iris Biometrics", "doi": null, "abstractUrl": "/journal/tp/2009/09/ttp2009091670/13rRUwInvzz", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545840", "title": "SegDenseNet: Iris Segmentation for Pre-and-Post Cataract Surgery", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545840/17D45VN31hS", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2022/1015/0/101500a511", "title": "A Novel Iris Segmentation Approach Using Spindle Ternary Tree", "doi": null, "abstractUrl": "/proceedings-article/itme/2022/101500a511/1M4rl9ZF6UM", "parentPublication": { "id": "proceedings/itme/2022/1015/0", "title": "2022 12th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1IHotVZum6Q", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "9956007", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1IHqDe5kTyU", "doi": "10.1109/ICPR56361.2022.9956573", "title": "Alcohol Consumption Detection from Periocular NIR Images Using Capsule Network", "normalizedTitle": "Alcohol Consumption Detection from Periocular NIR Images Using Capsule Network", "abstract": "This research proposes a method to detect alcohol consumption from a Near-Infra-Red (NIR) periocular eye images. The study focuses on determining the effect of external factors such as alcohol on the Central Nervous System (CNS). The goal is to analyse how this impacts on iris and pupil movements and if it is possible to capture these changes with a standard iris NIR camera. This paper proposes a novel Fused Capsule Network (F-CapsNet) to classify iris NIR images taken under alcohol consumption subjects. The results show the F-CapsNet algorithm can detect alcohol consumption in iris NIR images with an accuracy of 92.3% using half of parameters than the standard Capsule Network algorithm. This work is a step forward for developing an automatic system to estimate \"Fitness for Duty\" and prevent accidents due to alcohol consumption.", "abstracts": [ { "abstractType": "Regular", "content": "This research proposes a method to detect alcohol consumption from a Near-Infra-Red (NIR) periocular eye images. The study focuses on determining the effect of external factors such as alcohol on the Central Nervous System (CNS). The goal is to analyse how this impacts on iris and pupil movements and if it is possible to capture these changes with a standard iris NIR camera. This paper proposes a novel Fused Capsule Network (F-CapsNet) to classify iris NIR images taken under alcohol consumption subjects. The results show the F-CapsNet algorithm can detect alcohol consumption in iris NIR images with an accuracy of 92.3% using half of parameters than the standard Capsule Network algorithm. This work is a step forward for developing an automatic system to estimate \"Fitness for Duty\" and prevent accidents due to alcohol consumption.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This research proposes a method to detect alcohol consumption from a Near-Infra-Red (NIR) periocular eye images. The study focuses on determining the effect of external factors such as alcohol on the Central Nervous System (CNS). The goal is to analyse how this impacts on iris and pupil movements and if it is possible to capture these changes with a standard iris NIR camera. This paper proposes a novel Fused Capsule Network (F-CapsNet) to classify iris NIR images taken under alcohol consumption subjects. The results show the F-CapsNet algorithm can detect alcohol consumption in iris NIR images with an accuracy of 92.3% using half of parameters than the standard Capsule Network algorithm. This work is a step forward for developing an automatic system to estimate \"Fitness for Duty\" and prevent accidents due to alcohol consumption.", "fno": "09956573", "keywords": [ "Behavioural Sciences Computing", "Biomedical Optical Imaging", "Cameras", "Eye", "Feature Extraction", "Image Classification", "Iris Recognition", "Learning Artificial Intelligence", "Neurophysiology", "Patient Monitoring", "Pattern Classification", "Regression Analysis", "Alcohol Consumption Detection", "Automatic System", "Central Nervous System", "Fitness For Duty Estimation", "Fused Capsule Network", "Near Infrared Periocular Eye Images", "Periocular NIR Images", "Pupil Movements", "Standard Capsule Network Algorithm", "Standard Iris NIR Camera", "Central Nervous System", "Cameras", "Classification Algorithms", "Pattern Recognition", "Pupils", "Standards", "Iris Recognition" ], "authors": [ { "affiliation": "Hochschule Darmstadt,da/sec-Biometrics and Internet Security Research Group,Germany", "fullName": "Juan Tapia", "givenName": "Juan", "surname": "Tapia", "__typename": "ArticleAuthorType" }, { "affiliation": "Hochschule Darmstadt,da/sec-Biometrics and Internet Security Research Group,Germany", "fullName": "Enrique Lopez Droguett", "givenName": "Enrique Lopez", "surname": "Droguett", "__typename": "ArticleAuthorType" }, { "affiliation": "Hochschule Darmstadt,da/sec-Biometrics and Internet Security Research Group,Germany", "fullName": "Christoph Busch", "givenName": "Christoph", "surname": "Busch", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "959-966", "year": "2022", "issn": null, "isbn": "978-1-6654-9062-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09956357", "articleId": "1IHoxY8V89W", "__typename": "AdjacentArticleType" }, "next": { "fno": "09956336", "articleId": "1IHpxNokOfm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2010/7029/0/05544621", "title": "Periocular region appearance cues for biometric identification", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05544621/12OmNAhxjD5", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a201", "title": "On the Fusion of Periocular and Iris Biometrics in Non-ideal Imagery", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a201/12OmNB0Fxi6", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvisp/2017/0612/0/0612a040", "title": "Real-World Non-NIR Illumination and Wavelength-Specific Acquisition Variants in Iris Recognition", "doi": null, "abstractUrl": "/proceedings-article/icvisp/2017/0612a040/12OmNs4S8CM", "parentPublication": { "id": "proceedings/icvisp/2017/0612/0", "title": "2017 International Conference on Vision, Image and Signal Processing (ICVISP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbeb/2012/4706/0/4706b566", "title": "NIR Determination of Three Critical Quality Attributes in Alcohol Precipitation Process of Lonicerae Japonicae with Uncertainty Analysis", "doi": null, "abstractUrl": "/proceedings-article/icbeb/2012/4706b566/12OmNvA1h9a", "parentPublication": { "id": "proceedings/icbeb/2012/4706/0", "title": "Biomedical Engineering and Biotechnology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2013/5053/0/06475025", "title": "Periocular biometric recognition using image sets", "doi": null, "abstractUrl": "/proceedings-article/wacv/2013/06475025/12OmNyTOspv", "parentPublication": { "id": "proceedings/wacv/2013/5053/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2015/6759/0/07301318", "title": "Evaluation of combined visible/NIR camera for iris authentication on smartphones", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2015/07301318/12OmNyr8Yze", "parentPublication": { "id": "proceedings/cvprw/2015/6759/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460901", "title": "Iris image classification based on color information", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460901/12OmNzX6ckU", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500a867", "title": "Iris Recognition: Comparing Visible-Light Lateral and Frontal Illumination to NIR Frontal Illumination", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500a867/18j8HqxuBZ6", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2019/5227/0/522700a178", "title": "Simultaneous Iris and Periocular Region Detection Using Coarse Annotations", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2019/522700a178/1fHlqIwKGu4", "parentPublication": { "id": "proceedings/sibgrapi/2019/5227/0", "title": "2019 32nd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900o4304", "title": "Capsule Network is Not More Robust than Convolutional Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900o4304/1yeI5tJT6WA", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1i5mkDyiIUg", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1i5msUQsh1u", "doi": "10.1109/ICCVW.2019.00456", "title": "EyeNet: Attention Based Convolutional Encoder-Decoder Network for Eye Region Segmentation", "normalizedTitle": "EyeNet: Attention Based Convolutional Encoder-Decoder Network for Eye Region Segmentation", "abstract": "With the immersive development in the field of augmented and virtual reality, accurate and speedy eye-tracking is required. Facebook Research has organized a challenge, named OpenEDS Semantic Segmentation challenge for per-pixel segmentation of the key eye regions: the sclera, the iris, the pupil, and everything else (background). There are two constraints set for the participants viz MIOU and the computational complexity of the model. More recently, researchers have achieved quite a good result using the convolutional neural networks (CNN) in segmenting eye-regions. However, the environmental challenges involved in this task such as low resolution, blur, unusual glint and, illumination, off-angles, off-axis, use of glasses and different color of iris region hinder the accuracy of segmentation. To address the challenges in eye segmentation, the present work proposes a robust and computationally efficient attention-based convolutional encoder-decoder network for segmenting all the eye regions. Our model, named EyeNet, includes modified residual units as the backbone, two types of attention blocks and multi-scale supervision for segmenting the aforesaid four eye regions. Our proposed model achieved a total score of 0.974(EDS Evaluation metric) on test data, which demonstrates superior results compared to the baseline methods.", "abstracts": [ { "abstractType": "Regular", "content": "With the immersive development in the field of augmented and virtual reality, accurate and speedy eye-tracking is required. Facebook Research has organized a challenge, named OpenEDS Semantic Segmentation challenge for per-pixel segmentation of the key eye regions: the sclera, the iris, the pupil, and everything else (background). There are two constraints set for the participants viz MIOU and the computational complexity of the model. More recently, researchers have achieved quite a good result using the convolutional neural networks (CNN) in segmenting eye-regions. However, the environmental challenges involved in this task such as low resolution, blur, unusual glint and, illumination, off-angles, off-axis, use of glasses and different color of iris region hinder the accuracy of segmentation. To address the challenges in eye segmentation, the present work proposes a robust and computationally efficient attention-based convolutional encoder-decoder network for segmenting all the eye regions. Our model, named EyeNet, includes modified residual units as the backbone, two types of attention blocks and multi-scale supervision for segmenting the aforesaid four eye regions. Our proposed model achieved a total score of 0.974(EDS Evaluation metric) on test data, which demonstrates superior results compared to the baseline methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the immersive development in the field of augmented and virtual reality, accurate and speedy eye-tracking is required. Facebook Research has organized a challenge, named OpenEDS Semantic Segmentation challenge for per-pixel segmentation of the key eye regions: the sclera, the iris, the pupil, and everything else (background). There are two constraints set for the participants viz MIOU and the computational complexity of the model. More recently, researchers have achieved quite a good result using the convolutional neural networks (CNN) in segmenting eye-regions. However, the environmental challenges involved in this task such as low resolution, blur, unusual glint and, illumination, off-angles, off-axis, use of glasses and different color of iris region hinder the accuracy of segmentation. To address the challenges in eye segmentation, the present work proposes a robust and computationally efficient attention-based convolutional encoder-decoder network for segmenting all the eye regions. Our model, named EyeNet, includes modified residual units as the backbone, two types of attention blocks and multi-scale supervision for segmenting the aforesaid four eye regions. Our proposed model achieved a total score of 0.974(EDS Evaluation metric) on test data, which demonstrates superior results compared to the baseline methods.", "fno": "502300d688", "keywords": [ "Convolutional Neural Nets", "Gaze Tracking", "Image Segmentation", "Iris Recognition", "Convolutional Neural Networks", "Per Pixel Segmentation", "Open EDS Semantic Segmentation Challenge", "Facebook Research", "Eye Tracking", "Eye Region Segmentation", "Attention Blocks", "Eye Net", "Attention Based Convolutional Encoder Decoder Network", "Iris Region", "Image Segmentation", "Decoding", "Iris Recognition", "Iris", "Training", "Computational Modeling", "Solid Modeling", "Eyenet", "Segmentation", "AR VR", "Attention Based Model" ], "authors": [ { "affiliation": "Couger Inc.", "fullName": "Priya Kansal", "givenName": "Priya", "surname": "Kansal", "__typename": "ArticleAuthorType" }, { "affiliation": "Couger Inc.", "fullName": "Sabarinathan Devanathan", "givenName": "Sabarinathan", "surname": "Devanathan", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "3688-3693", "year": "2019", "issn": null, "isbn": "978-1-7281-5023-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "502300d683", "articleId": "1i5mOYR5gre", "__typename": "AdjacentArticleType" }, "next": { "fno": "502300d694", "articleId": "1i5muSosJkk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/synasc/2009/3964/0/3964a384", "title": "Exploring New Directions in Iris Recognition", "doi": null, "abstractUrl": "/proceedings-article/synasc/2009/3964a384/12OmNx6xHoH", "parentPublication": { "id": "proceedings/synasc/2009/3964/0", "title": "2009 11th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icect/2009/3559/0/3559a554", "title": "Iris Recognition System Using Statistical Features for Biometric Identification", "doi": null, "abstractUrl": "/proceedings-article/icect/2009/3559a554/12OmNyPQ4wG", "parentPublication": { "id": "proceedings/icect/2009/3559/0", "title": "2009 International Conference on Electronic Computer Technology. ICECT 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477673", "title": "Accurate eye center localization using Snakuscule", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477673/12OmNyQ7G3B", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2010/08/ttp2010081502", "title": "Iris Recognition: On the Segmentation of Degraded Images Acquired in the Visible Wavelength", "doi": null, "abstractUrl": "/journal/tp/2010/08/ttp2010081502/13rRUwcS1Ee", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ewdts/2019/1003/0/08884441", "title": "Algorithm for Extraction of the Iris Region in an Eye Image", "doi": null, "abstractUrl": "/proceedings-article/ewdts/2019/08884441/1eEV15VyV20", "parentPublication": { "id": "proceedings/ewdts/2019/1003/0", "title": "2019 IEEE East-West Design & Test Symposium (EWDTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d665", "title": "Eye-MMS: Miniature Multi-Scale Segmentation Network of Key Eye-Regions in Embedded Applications", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d665/1i5mIWMkdzO", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d694", "title": "Eye Semantic Segmentation with A Lightweight Model", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d694/1i5muSosJkk", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcomp/2020/6034/0/603400a121", "title": "Ocular-Net: Lite-Residual Encoder Decoder Network for Accurate Ocular Regions Segmentation in Various Sensor Images", "doi": null, "abstractUrl": "/proceedings-article/bigcomp/2020/603400a121/1jdDvxdKXDy", "parentPublication": { "id": "proceedings/bigcomp/2020/6034/0", "title": "2020 IEEE International Conference on Big Data and Smart Computing (BigComp)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a011", "title": "Edge-Guided Near-Eye Image Analysis for Head Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a011/1yeCW4N7Y9a", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a367", "title": "TEyeD: Over 20 Million Real-World Eye Images with Pupil, Eyelid, and Iris 2D and 3D Segmentations, 2D and 3D Landmarks, 3D Eyeball, Gaze Vector, and Eye Movement Types", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a367/1yeD3XlUpBS", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBUAvUf", "title": "Artificial Intelligence for Applications, Conference on", "acronym": "caia", "groupId": "1000050", "volume": "0", "displayVolume": "0", "year": "1995", "__typename": "ProceedingType" }, "article": { "id": "12OmNApu5KL", "doi": "10.1109/CAIA.1995.378771", "title": "Constraint-based approach for automatic spatial layout planning", "normalizedTitle": "Constraint-based approach for automatic spatial layout planning", "abstract": "Spatial layout planning generates two-dimensional layouts consisting of configuration of rectangles, and our new method represents the rectangles as an inconsistent sets of linear constraints. After that, we select the consistent sets from them by using constraint processing language. Because this type of problem is of the NP-complete class, we also introduce some redundant constraints such as qualitative relation and capacity. Redundant constraints generally slow a system, but with the partial (incremental) solver used for the's type of problem, redundant constraints may be useful in reducing the search space. Our approach is now being applied to the layout of personal homes. In this paper, we use this example to explain our approach and we describe our floor planning system TG-FP (Tokyo Gas Floor Planner) implemented by CLP (Constraint Logic Programming) Language. TG-FP can assist an architect by providing a large variety of feasible plans. In TG-FP, the only information that must be provided by the user is the definition of the room. We also point out the advantages of our approach with CLP language: advantages such as a declarative paradigm for easy description and efficiency for avoiding combinatorial explosion.", "abstracts": [ { "abstractType": "Regular", "content": "Spatial layout planning generates two-dimensional layouts consisting of configuration of rectangles, and our new method represents the rectangles as an inconsistent sets of linear constraints. After that, we select the consistent sets from them by using constraint processing language. Because this type of problem is of the NP-complete class, we also introduce some redundant constraints such as qualitative relation and capacity. Redundant constraints generally slow a system, but with the partial (incremental) solver used for the's type of problem, redundant constraints may be useful in reducing the search space. Our approach is now being applied to the layout of personal homes. In this paper, we use this example to explain our approach and we describe our floor planning system TG-FP (Tokyo Gas Floor Planner) implemented by CLP (Constraint Logic Programming) Language. TG-FP can assist an architect by providing a large variety of feasible plans. In TG-FP, the only information that must be provided by the user is the definition of the room. We also point out the advantages of our approach with CLP language: advantages such as a declarative paradigm for easy description and efficiency for avoiding combinatorial explosion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Spatial layout planning generates two-dimensional layouts consisting of configuration of rectangles, and our new method represents the rectangles as an inconsistent sets of linear constraints. After that, we select the consistent sets from them by using constraint processing language. Because this type of problem is of the NP-complete class, we also introduce some redundant constraints such as qualitative relation and capacity. Redundant constraints generally slow a system, but with the partial (incremental) solver used for the's type of problem, redundant constraints may be useful in reducing the search space. Our approach is now being applied to the layout of personal homes. In this paper, we use this example to explain our approach and we describe our floor planning system TG-FP (Tokyo Gas Floor Planner) implemented by CLP (Constraint Logic Programming) Language. TG-FP can assist an architect by providing a large variety of feasible plans. In TG-FP, the only information that must be provided by the user is the definition of the room. We also point out the advantages of our approach with CLP language: advantages such as a declarative paradigm for easy description and efficiency for avoiding combinatorial explosion.", "fno": "70700038", "keywords": [ "Constraint Handling Spatial Reasoning Problem Solving Planning Artificial Intelligence Architecture Automatic Spatial Layout Planning Two Dimensional Layouts Rectangles Linear Constraints Constraint Processing Language Redundant Constraints CLP Constraint Logic Programming Language Declarative Paradigm Combinatorial Explosion" ], "authors": [ { "affiliation": ".", "fullName": "K. Honda", "givenName": "K.", "surname": "Honda", "__typename": "ArticleAuthorType" }, { "affiliation": ".", "fullName": "F Mizoguchi", "givenName": "F", "surname": "Mizoguchi", "__typename": "ArticleAuthorType" } ], "idPrefix": "caia", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1995-02-01T00:00:00", "pubType": "proceedings", "pages": "38", "year": "1995", "issn": "1043-0989", "isbn": "0-8186-7070-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "70700032", "articleId": "12OmNyO8tSc", "__typename": "AdjacentArticleType" }, "next": { "fno": "70700046", "articleId": "12OmNBPc8vn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzcxZfq", "title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)", "acronym": "vlhcc", "groupId": "1001007", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNy4r3Sw", "doi": "10.1109/VLHCC.2011.6070379", "title": "Automatic diagram layout support for the Marama meta-toolset", "normalizedTitle": "Automatic diagram layout support for the Marama meta-toolset", "abstract": "Automatic layout can be a crucial support feature for complex diagramming tools. Adding suitable layout algorithms to diagramming tools is a complex task and meta-tools should incorporate these for reuse. We present MaramaALM, a generalised set of automatic layout mechanisms. This has been incorporated in the Eclipse-based Marama meta-toolset to support automatic layout in Marama diagrams. It provides an easy-to-use mechanism for tool developers to add such layouts to their generated tools. We describe our motivation for MaramaALM, our approach to its implementation and an example case study of using these tool extensions.", "abstracts": [ { "abstractType": "Regular", "content": "Automatic layout can be a crucial support feature for complex diagramming tools. Adding suitable layout algorithms to diagramming tools is a complex task and meta-tools should incorporate these for reuse. We present MaramaALM, a generalised set of automatic layout mechanisms. This has been incorporated in the Eclipse-based Marama meta-toolset to support automatic layout in Marama diagrams. It provides an easy-to-use mechanism for tool developers to add such layouts to their generated tools. We describe our motivation for MaramaALM, our approach to its implementation and an example case study of using these tool extensions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Automatic layout can be a crucial support feature for complex diagramming tools. Adding suitable layout algorithms to diagramming tools is a complex task and meta-tools should incorporate these for reuse. We present MaramaALM, a generalised set of automatic layout mechanisms. This has been incorporated in the Eclipse-based Marama meta-toolset to support automatic layout in Marama diagrams. It provides an easy-to-use mechanism for tool developers to add such layouts to their generated tools. We describe our motivation for MaramaALM, our approach to its implementation and an example case study of using these tool extensions.", "fno": "06070379", "keywords": [ "Layout", "Visualization", "Shape", "Generators", "Measurement", "Connectors", "Algorithm Design And Analysis", "Automatic Layout", "Meta Tools", "User Interface Design" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Univ. of Auckland, Auckland, New Zealand", "fullName": "Pei Shan Yap", "givenName": null, "surname": "Pei Shan Yap", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Univ. of Auckland, Auckland, New Zealand", "fullName": "J. Hosking", "givenName": "J.", "surname": "Hosking", "__typename": "ArticleAuthorType" }, { "affiliation": "Centre for Comput. & Eng. Software Syst., Swinburne Univ. of Technol., Melbourne, VIC, Australia", "fullName": "J. Grundy", "givenName": "J.", "surname": "Grundy", "__typename": "ArticleAuthorType" } ], "idPrefix": "vlhcc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-09-01T00:00:00", "pubType": "proceedings", "pages": "61-64", "year": "2011", "issn": "1943-6092", "isbn": "978-1-4577-1246-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06070378", "articleId": "12OmNCbU2Vb", "__typename": "AdjacentArticleType" }, "next": { "fno": "06070380", "articleId": "12OmNAsk4Cq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/case/1993/3480/0/00634835", "title": "A heuristics approach to automatic data flow diagram layout", "doi": null, "abstractUrl": "/proceedings-article/case/1993/00634835/12OmNC0y5Gt", "parentPublication": { "id": "proceedings/case/1993/3480/0", "title": "Proceedings of 6th International Workshop on Computer-Aided Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vl/1993/3970/0/00269619", "title": "Constraint-driven diagram layout", "doi": null, "abstractUrl": "/proceedings-article/vl/1993/00269619/12OmNwvVrCN", "parentPublication": { "id": "proceedings/vl/1993/3970/0", "title": "Proceedings 1993 IEEE Symposium on Visual Languages", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vl/1999/0216/0/02160012", "title": "Constraint-Based Diagram Beautification", "doi": null, "abstractUrl": "/proceedings-article/vl/1999/02160012/12OmNxVV5ZU", "parentPublication": { "id": "proceedings/vl/1999/0216/0", "title": "Visual Languages, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wvl/1992/3090/0/00275779", "title": "Layout-by-example: a fuzzy visual language for specifying stereotypes of diagram layout", "doi": null, "abstractUrl": "/proceedings-article/wvl/1992/00275779/12OmNxzuMKd", "parentPublication": { "id": "proceedings/wvl/1992/3090/0", "title": "Proceedings IEEE Workshop on Visual Languages", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vl/1998/8712/0/87120056", "title": "Competitive Learning of Network Diagram Layout", "doi": null, "abstractUrl": "/proceedings-article/vl/1998/87120056/12OmNyRPgyj", "parentPublication": { "id": "proceedings/vl/1998/8712/0", "title": "Visual Languages, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edac/1992/2645/0/00205989", "title": "An automatic layout generator for analog circuits", "doi": null, "abstractUrl": "/proceedings-article/edac/1992/00205989/12OmNyS6RzD", "parentPublication": { "id": "proceedings/edac/1992/2645/0", "title": "Proceedings The European Conference on Design Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2004/2131/0/01342744", "title": "Architecture for generating Web-based, thin-client diagramming tools", "doi": null, "abstractUrl": "/proceedings-article/ase/2004/01342744/12OmNyen1ty", "parentPublication": { "id": "proceedings/ase/2004/2131/0", "title": "Proceedings. 19th International Conference on Automated Software Engineering, 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seke/1992/2830/0/00227922", "title": "Automatic layout of diagrams for software specification", "doi": null, "abstractUrl": "/proceedings-article/seke/1992/00227922/12OmNz2TCuu", "parentPublication": { "id": "proceedings/seke/1992/2830/0", "title": "Proceedings Fourth International Conference on Software Engineering and Knowledge Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000h986", "title": "Inferring Semantic Layout for Hierarchical Text-to-Image Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000h986/17D45VTRoCU", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a109", "title": "Automatic Furniture Layout Based on Functional Area Division", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a109/1fHklquet0s", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKiq2", "title": "2018 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "acronym": "vlhcc", "groupId": "1001007", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WUj90b", "doi": "10.1109/VLHCC.2018.8506571", "title": "Automatic Layout and Label Management for Compact UML Sequence Diagrams", "normalizedTitle": "Automatic Layout and Label Management for Compact UML Sequence Diagrams", "abstract": "Sequence diagrams belong to the most commonly used UML diagrams. There is research on desirable aesthetics, but to our knowledge no layout algorithms have been published. This might be due to the rigid specification of sequence diagrams that seems to make laying them out quite easy. However, as we argue here, naive algorithms do not always produce desirable solutions. We present methods to produce compact layouts which we have implemented in a layout algorithm and evaluate them with 50 real-world sequence diagrams.", "abstracts": [ { "abstractType": "Regular", "content": "Sequence diagrams belong to the most commonly used UML diagrams. There is research on desirable aesthetics, but to our knowledge no layout algorithms have been published. This might be due to the rigid specification of sequence diagrams that seems to make laying them out quite easy. However, as we argue here, naive algorithms do not always produce desirable solutions. We present methods to produce compact layouts which we have implemented in a layout algorithm and evaluate them with 50 real-world sequence diagrams.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Sequence diagrams belong to the most commonly used UML diagrams. There is research on desirable aesthetics, but to our knowledge no layout algorithms have been published. This might be due to the rigid specification of sequence diagrams that seems to make laying them out quite easy. However, as we argue here, naive algorithms do not always produce desirable solutions. We present methods to produce compact layouts which we have implemented in a layout algorithm and evaluate them with 50 real-world sequence diagrams.", "fno": "08506571", "keywords": [ "Layout", "Compaction", "Visualization", "Unified Modeling Language", "Electronic Mail", "Optimized Production Technology" ], "authors": [ { "affiliation": "Department of Computer Science, Christian-Albrechts-Universität, zu Kiel Kiel, Germany", "fullName": "Christoph Daniel Schulze", "givenName": "Christoph Daniel", "surname": "Schulze", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science, Christian-Albrechts-Universität, zu Kiel Kiel, Germany", "fullName": "Gregor Hoops", "givenName": "Gregor", "surname": "Hoops", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science, Christian-Albrechts-Universität, zu Kiel Kiel, Germany", "fullName": "Reinhard von Hanxleden", "givenName": "Reinhard", "surname": "von Hanxleden", "__typename": "ArticleAuthorType" } ], "idPrefix": "vlhcc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "187-191", "year": "2018", "issn": null, "isbn": "978-1-5386-4235-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08506508", "articleId": "17D45X2fUFl", "__typename": "AdjacentArticleType" }, "next": { "fno": "08506512", "articleId": "17D45W1Oa1W", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sbes/2014/4223/0/4223a141", "title": "Variability Identification and Representation in Software Product Line UML Sequence Diagrams: Proposal and Empirical Study", "doi": null, "abstractUrl": "/proceedings-article/sbes/2014/4223a141/12OmNBqMDoS", "parentPublication": { "id": "proceedings/sbes/2014/4223/0", "title": "2014 Brazilian Symposium on Software Engineering (SBES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2011/1246/0/06070390", "title": "On the impact of layout quality to understanding UML diagrams", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2011/06070390/12OmNqHItMD", "parentPublication": { "id": "proceedings/vlhcc/2011/1246/0", "title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2012/0852/0/06344480", "title": "On the impact of layout quality to understanding UML diagrams: Diagram type and expertise", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2012/06344480/12OmNrNh0KH", "parentPublication": { "id": "proceedings/vlhcc/2012/0852/0", "title": "2012 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2003/1988/0/19880272", "title": "Layout Metrics for Euler Diagrams", "doi": null, "abstractUrl": "/proceedings-article/iv/2003/19880272/12OmNvD8RBs", "parentPublication": { "id": "proceedings/iv/2003/1988/0", "title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icstw/2014/5790/0/5790a088", "title": "Extracting the Combinatorial Test Parameters and Values from UML Sequence Diagrams", "doi": null, "abstractUrl": "/proceedings-article/icstw/2014/5790a088/12OmNwKoZcu", "parentPublication": { "id": "proceedings/icstw/2014/5790/0", "title": "2014 IEEE Seventh International Conference on Software Testing, Verification and Validation Workshops (ICSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wpc/2005/2254/0/22540317", "title": "On Evaluating the Layout of UML Class Diagrams for Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/wpc/2005/22540317/12OmNxEjXXP", "parentPublication": { "id": "proceedings/wpc/2005/2254/0", "title": "Proceedings. 13th International Workshop on Program Comprehension", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2016/0252/0/07739657", "title": "Label management: Keeping complex diagrams usable", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2016/07739657/12OmNy9Priu", "parentPublication": { "id": "proceedings/vlhcc/2016/0252/0", "title": "2016 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seke/1992/2830/0/00227922", "title": "Automatic layout of diagrams for software specification", "doi": null, "abstractUrl": "/proceedings-article/seke/1992/00227922/12OmNz2TCuu", "parentPublication": { "id": "proceedings/seke/1992/2830/0", "title": "Proceedings Fourth International Conference on Software Engineering and Knowledge Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2018/5663/0/566301a396", "title": "Poster: How Do Modelers Read UML Diagrams? Preliminary Results from an Eye-Tracking Study", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2018/566301a396/13bd1gFCjrK", "parentPublication": { "id": "proceedings/icse-companion/2018/5663/0", "title": "2018 IEEE/ACM 40th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/1986/04/06312901", "title": "A layout algorithm for data flow diagrams", "doi": null, "abstractUrl": "/journal/ts/1986/04/06312901/13rRUwhpBPd", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBZYToK", "title": "2016 International Conference on Frontiers of Information Technology (FIT)", "acronym": "fit", "groupId": "1800803", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNCf1DwX", "doi": "10.1109/FIT.2016.053", "title": "Scene Completion Using Top-1 Similar Image", "normalizedTitle": "Scene Completion Using Top-1 Similar Image", "abstract": "Scene Completion is an interesting Image Processing problem that has recently been studied in the context of data, i.e. by using large repositories of data. One of the requirements for such a data intensive approach is that the completion has to be done without human intervention. This is rather challenging as it may not be clear that what could be the most suitable image for the completion purpose in the data repository that potentially contains millions of images. We propose a methodology for finding the top-1 image in a data repository that could be the best candidate for scene completion. We do so by computing a representative set of features namely Gist, Texture and Colour, and then give an algorithm for scene completion. To obtain the top-1 image, we consider a ranking scheme that satisfies the value-invariance property and thus, is not affected by the individual feature scores. The scene completion algorithm completes the input image with the constraint that the completion has to be seamless. The approach is data-driven and there is no need of labelling by the user. Although, the completion process is automated, we also allow the user to select a completion image from the top-k matches in order to have a completion that is semantically valid. The experimental results show that we are able to find a matching image that is able to complete the input image seamlessly.", "abstracts": [ { "abstractType": "Regular", "content": "Scene Completion is an interesting Image Processing problem that has recently been studied in the context of data, i.e. by using large repositories of data. One of the requirements for such a data intensive approach is that the completion has to be done without human intervention. This is rather challenging as it may not be clear that what could be the most suitable image for the completion purpose in the data repository that potentially contains millions of images. We propose a methodology for finding the top-1 image in a data repository that could be the best candidate for scene completion. We do so by computing a representative set of features namely Gist, Texture and Colour, and then give an algorithm for scene completion. To obtain the top-1 image, we consider a ranking scheme that satisfies the value-invariance property and thus, is not affected by the individual feature scores. The scene completion algorithm completes the input image with the constraint that the completion has to be seamless. The approach is data-driven and there is no need of labelling by the user. Although, the completion process is automated, we also allow the user to select a completion image from the top-k matches in order to have a completion that is semantically valid. The experimental results show that we are able to find a matching image that is able to complete the input image seamlessly.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Scene Completion is an interesting Image Processing problem that has recently been studied in the context of data, i.e. by using large repositories of data. One of the requirements for such a data intensive approach is that the completion has to be done without human intervention. This is rather challenging as it may not be clear that what could be the most suitable image for the completion purpose in the data repository that potentially contains millions of images. We propose a methodology for finding the top-1 image in a data repository that could be the best candidate for scene completion. We do so by computing a representative set of features namely Gist, Texture and Colour, and then give an algorithm for scene completion. To obtain the top-1 image, we consider a ranking scheme that satisfies the value-invariance property and thus, is not affected by the individual feature scores. The scene completion algorithm completes the input image with the constraint that the completion has to be seamless. The approach is data-driven and there is no need of labelling by the user. Although, the completion process is automated, we also allow the user to select a completion image from the top-k matches in order to have a completion that is semantically valid. The experimental results show that we are able to find a matching image that is able to complete the input image seamlessly.", "fno": "5300a252", "keywords": [ "Image Segmentation", "Image Color Analysis", "Image Retrieval", "Feature Extraction", "Computer Science", "Electronic Mail", "Data Intensive Scene Completion", "Scene Completion", "Top K Queries" ], "authors": [ { "affiliation": null, "fullName": "Romana Talat", "givenName": "Romana", "surname": "Talat", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Muhammad Muzammal", "givenName": "Muhammad", "surname": "Muzammal", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Imran Siddiqi", "givenName": "Imran", "surname": "Siddiqi", "__typename": "ArticleAuthorType" } ], "idPrefix": "fit", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-12-01T00:00:00", "pubType": "proceedings", "pages": "252-257", "year": "2016", "issn": null, "isbn": "978-1-5090-5300-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5300a247", "articleId": "12OmNyaGeFv", "__typename": "AdjacentArticleType" }, "next": { "fno": "5300a258", "articleId": "12OmNyaXPTv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icicta/2011/4353/2/05750952", "title": "Image Completion Using Constrained Delaunay Triangulation", "doi": null, "abstractUrl": "/proceedings-article/icicta/2011/05750952/12OmNBNM8Sj", "parentPublication": { "id": "icicta/2011/4353/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d872", "title": "Reflection Removal Using Low-Rank Matrix Completion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d872/12OmNy1SFGA", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2013/6463/0/06528313", "title": "Transformation guided image completion", "doi": null, "abstractUrl": "/proceedings-article/iccp/2013/06528313/12OmNz3bdGu", "parentPublication": { "id": "proceedings/iccp/2013/6463/0", "title": "2013 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851a488", "title": "Multiview Image Completion with Space Structure Propagation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851a488/12OmNzUgdhd", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457a190", "title": "Semantic Scene Completion from a Single Depth Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457a190/12OmNzn38Ky", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/12/06853394", "title": "Image Completion Approaches Using the Statistics of Similar Patches", "doi": null, "abstractUrl": "/journal/tp/2014/12/06853394/13rRUxBJhnT", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10093076", "title": "Point Cloud Scene Completion with Joint Color and Semantic Estimation from Single RGB-D Image", "doi": null, "abstractUrl": "/journal/tp/5555/01/10093076/1M61SaJ53LG", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h800", "title": "Cascaded Context Pyramid for Full-Resolution 3D Semantic Scene Completion", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h800/1hVlr89nneE", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413252", "title": "EdgeNet: Semantic Scene Completion from a Single RGB- D Image", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413252/1tmjb13jEDS", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900a324", "title": "Semantic Scene Completion via Integrating Instances and Scene in-the-Loop", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900a324/1yeLH3ZxKaQ", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBkfRgM", "title": "Pacific-Asia Workshop on Computational Intelligence and Industrial Application, IEEE", "acronym": "paciia", "groupId": "1002599", "volume": "1", "displayVolume": "1", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNvrvjdO", "doi": "10.1109/PACIIA.2008.271", "title": "Image Completion for Overlapping Chromosomes", "normalizedTitle": "Image Completion for Overlapping Chromosomes", "abstract": "In this paper, we proposed a novel approach to image completion for overlapping chromosomes. In our system, only given missing regions, the task can be performed automatically without human intervention. We address the problem of image completion for overlapping chromosomes in the context of a discrete global optimization problem. In order to reconstruct the original chromosome image as faithfully as possible, the objective cost function of this problem is defined under constraint conditions of band patterns in chromosomes image, and corresponds to the energy of a discrete Markov random fields. For efficiently optimizing this MRF, a loopy Belief Propagation algorithm is utilized to perform it. Experiment results on input overlapping chromosomes images are presented, which demonstrate the effectiveness of our approach.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we proposed a novel approach to image completion for overlapping chromosomes. In our system, only given missing regions, the task can be performed automatically without human intervention. We address the problem of image completion for overlapping chromosomes in the context of a discrete global optimization problem. In order to reconstruct the original chromosome image as faithfully as possible, the objective cost function of this problem is defined under constraint conditions of band patterns in chromosomes image, and corresponds to the energy of a discrete Markov random fields. For efficiently optimizing this MRF, a loopy Belief Propagation algorithm is utilized to perform it. Experiment results on input overlapping chromosomes images are presented, which demonstrate the effectiveness of our approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we proposed a novel approach to image completion for overlapping chromosomes. In our system, only given missing regions, the task can be performed automatically without human intervention. We address the problem of image completion for overlapping chromosomes in the context of a discrete global optimization problem. In order to reconstruct the original chromosome image as faithfully as possible, the objective cost function of this problem is defined under constraint conditions of band patterns in chromosomes image, and corresponds to the energy of a discrete Markov random fields. For efficiently optimizing this MRF, a loopy Belief Propagation algorithm is utilized to perform it. Experiment results on input overlapping chromosomes images are presented, which demonstrate the effectiveness of our approach.", "fno": "3490a413", "keywords": [ "Chromosome", "Image Completion", "Markov Random Fields", "Belief Propagation" ], "authors": [ { "affiliation": null, "fullName": "Xiuzhuang Zhou", "givenName": "Xiuzhuang", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yao Lu", "givenName": "Yao", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ziye Yan", "givenName": "Ziye", "surname": "Yan", "__typename": "ArticleAuthorType" } ], "idPrefix": "paciia", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "413-417", "year": "2008", "issn": null, "isbn": "978-0-7695-3490-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04756609", "articleId": "12OmNyOq4UN", "__typename": "AdjacentArticleType" }, "next": { "fno": "3490a418", "articleId": "12OmNClQ0y9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2011/4541/0/4541a199", "title": "Structure-Aware Image Completion with Texture Propagation", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a199/12OmNBTJIAX", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isip/2010/4261/0/4261a172", "title": "Hierarchically Clustering IDS Alarms Using a GA with Vary-lengthed Chromosomes", "doi": null, "abstractUrl": "/proceedings-article/isip/2010/4261a172/12OmNqIQS31", "parentPublication": { "id": "proceedings/isip/2010/4261/0", "title": "2010 Third International Symposium on Information Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgec/2010/4281/0/4281a094", "title": "A Novel Exemplar-Based Image Completion Scheme with Adaptive TV-Constraint", "doi": null, "abstractUrl": "/proceedings-article/icgec/2010/4281a094/12OmNrIaekd", "parentPublication": { "id": "proceedings/icgec/2010/4281/0", "title": "Genetic and Evolutionary Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a555", "title": "PatchMatch-Based Content Completion of Stereo Image Pairs", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a555/12OmNrkBwy5", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/broadcom/2008/3453/0/3453a186", "title": "Design of a Neural Network Classifier for Separation of Images with One Chromosome from Images with Several Chromosomes", "doi": null, "abstractUrl": "/proceedings-article/broadcom/2008/3453a186/12OmNxveNTd", "parentPublication": { "id": "proceedings/broadcom/2008/3453/0", "title": "Broadband Communications, Information Technology &amp; Biomedical Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/etcs/2010/3987/1/3987a571", "title": "A Watershed Based Segmentation Method for Overlapping Chromosome Images", "doi": null, "abstractUrl": "/proceedings-article/etcs/2010/3987a571/12OmNyvY9vq", "parentPublication": { "id": "proceedings/etcs/2010/3987/1", "title": "Education Technology and Computer Science, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2012/4896/0/4896a305", "title": "Image Completion with Automatic Structure Propagation", "doi": null, "abstractUrl": "/proceedings-article/cis/2012/4896a305/12OmNzBOimX", "parentPublication": { "id": "proceedings/cis/2012/4896/0", "title": "2012 Eighth International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2005/2467/0/24670037", "title": "Image Completion from Low-Level Learning", "doi": null, "abstractUrl": "/proceedings-article/dicta/2005/24670037/12OmNzVGcH5", "parentPublication": { "id": "proceedings/dicta/2005/2467/0", "title": "Digital Image Computing: Techniques and Applications (DICTA'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dmdcm/2011/4413/0/4413a203", "title": "Virtual Completion of Facial Image in Ancient Murals", "doi": null, "abstractUrl": "/proceedings-article/dmdcm/2011/4413a203/12OmNzt0IQR", "parentPublication": { "id": "proceedings/dmdcm/2011/4413/0", "title": "Digital Media and Digital Content Management, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1997/11/i1212", "title": "Geometric Separation of Partially Overlapping Nonrigid Objects Applied to Automatic Chromosome Classification", "doi": null, "abstractUrl": "/journal/tp/1997/11/i1212/13rRUxlgy4M", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwekjuM", "title": "9th International Conference on Pattern Recognition", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "1988", "__typename": "ProceedingType" }, "article": { "id": "12OmNwKGAop", "doi": "10.1109/ICPR.1988.28161", "title": "Occlusion-free 3D recovery using mirror images", "normalizedTitle": "Occlusion-free 3D recovery using mirror images", "abstract": "Concerns 3D reconstruction from the 2D real image formed in a camera when mirrors are used so that both the object and reflections of it are viewed by the camera. The mirrors generate symmetric relations between direct images and mirror images. Correspondences between the direct image and the mirror images are found, and the 3D shape is reconstructed by a plane symmetry recovery method using the vanishing point. The use of several mirrors eliminates the occlusion that occurs when only one mirror is used.<>", "abstracts": [ { "abstractType": "Regular", "content": "Concerns 3D reconstruction from the 2D real image formed in a camera when mirrors are used so that both the object and reflections of it are viewed by the camera. The mirrors generate symmetric relations between direct images and mirror images. Correspondences between the direct image and the mirror images are found, and the 3D shape is reconstructed by a plane symmetry recovery method using the vanishing point. The use of several mirrors eliminates the occlusion that occurs when only one mirror is used.<>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Concerns 3D reconstruction from the 2D real image formed in a camera when mirrors are used so that both the object and reflections of it are viewed by the camera. The mirrors generate symmetric relations between direct images and mirror images. Correspondences between the direct image and the mirror images are found, and the 3D shape is reconstructed by a plane symmetry recovery method using the vanishing point. The use of several mirrors eliminates the occlusion that occurs when only one mirror is used.", "fno": "00028161", "keywords": [ "Computerised Pattern Recognition", "Computerised Picture Processing", "Occlusion Free 3 D Image Recovery", "Picture Processing", "Pattern Recognition", "Image Reconstruction", "Mirror Images", "Camera", "Symmetric Relations", "Direct Images", "3 D Shape", "Plane Symmetry Recovery Method", "Vanishing Point", "Mirrors", "Image Reconstruction", "Cameras", "Image Segmentation", "Shape", "Lenses", "Focusing", "Equations", "Joining Processes" ], "authors": [ { "affiliation": "Fac. of Eng., Tottori Univ., Japan", "fullName": "K. Okazaki", "givenName": "K.", "surname": "Okazaki", "__typename": "ArticleAuthorType" }, { "affiliation": "Fac. of Eng., Tottori Univ., Japan", "fullName": "N. Kajimi", "givenName": "N.", "surname": "Kajimi", "__typename": "ArticleAuthorType" }, { "affiliation": "Fac. of Eng., Tottori Univ., Japan", "fullName": "Y. Fukui", "givenName": "Y.", "surname": "Fukui", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "S. Tamura", "givenName": "S.", "surname": "Tamura", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "H. Mitsumoto", "givenName": "H.", "surname": "Mitsumoto", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1988-01-01T00:00:00", "pubType": "proceedings", "pages": "17,18,19", "year": "1988", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00028160", "articleId": "12OmNyRPgMk", "__typename": "AdjacentArticleType" }, "next": { "fno": "00028162", "articleId": "12OmNy3RRzg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isot/2014/6752/0/07119404", "title": "CubeSat Deformable Mirror Demonstration Mission", "doi": null, "abstractUrl": "/proceedings-article/isot/2014/07119404/12OmNBaBuPP", "parentPublication": { "id": "proceedings/isot/2014/6752/0", "title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2003/2105/7/210570069", "title": "Mirror shape recovery from image curves and intrinsic parameters: Rotationally symmetric and conic mirrors", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2003/210570069/12OmNC1Y5pm", "parentPublication": { "id": "proceedings/cvprw/2003/2105/7", "title": "2003 Conference on Computer Vision and Pattern Recognition Workshop - Volume 7", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi/2016/4470/0/4470a660", "title": "Fast Reinforcement Learning by Mirror Images", "doi": null, "abstractUrl": "/proceedings-article/wi/2016/4470a660/12OmNqGitVN", "parentPublication": { "id": "proceedings/wi/2016/4470/0", "title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209c083", "title": "Line-Images in Cone Mirror Catadioptric Systems", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209c083/12OmNzSh18v", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1992/09/i0941", "title": "3-D Reconstruction Using Mirror Images Based on a Plane Symmetry Recovering Method", "doi": null, "abstractUrl": "/journal/tp/1992/09/i0941/13rRUEgaroI", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f931", "title": "Learning Semantic Associations for Mirror Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f931/1H1m8sVi7e0", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956400", "title": "Scene Recognition for Blind Spot via Road Safety Mirror and In-Vehicle Camera", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956400/1IHpO3aRlgQ", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300i808", "title": "Where Is My Mirror?", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300i808/1hVlsCrhrfa", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d694", "title": "Progressive Mirror Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d694/1m3nPSImIEg", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/09/09393612", "title": "Structure of Multiple Mirror System From Kaleidoscopic Projections of Single 3D Point", "doi": null, "abstractUrl": "/journal/tp/2022/09/09393612/1srMAfD4SQw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyoiYVr", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNy1SFGA", "doi": "10.1109/CVPR.2017.412", "title": "Reflection Removal Using Low-Rank Matrix Completion", "normalizedTitle": "Reflection Removal Using Low-Rank Matrix Completion", "abstract": "The images taken through glass often capture a target transmitted scene as well as undesired reflected scenes. In this paper, we propose a low-rank matrix completion algorithm to remove reflection artifacts automatically from multiple glass images taken at slightly different camera locations. We assume that the transmitted scenes are more dominant than the reflected scenes in typical glass images. We first warp the multiple glass images to a reference image, where the gradients are consistent in the transmission images while the gradients are varying across the reflection images. Based on this observation, we compute a gradient reliability such that the pixels belonging to the salient edges of the transmission image are assigned high reliability. Then we suppress the gradients of the reflection images and recover the gradients of the transmission images only, by solving a low-rank matrix completion problem in gradient domain. We reconstruct an original transmission image using the resulting optimal gradient map. Experimental results show that the proposed algorithm removes the reflection artifacts from glass images faithfully and outperforms the existing algorithms on typical glass images.", "abstracts": [ { "abstractType": "Regular", "content": "The images taken through glass often capture a target transmitted scene as well as undesired reflected scenes. In this paper, we propose a low-rank matrix completion algorithm to remove reflection artifacts automatically from multiple glass images taken at slightly different camera locations. We assume that the transmitted scenes are more dominant than the reflected scenes in typical glass images. We first warp the multiple glass images to a reference image, where the gradients are consistent in the transmission images while the gradients are varying across the reflection images. Based on this observation, we compute a gradient reliability such that the pixels belonging to the salient edges of the transmission image are assigned high reliability. Then we suppress the gradients of the reflection images and recover the gradients of the transmission images only, by solving a low-rank matrix completion problem in gradient domain. We reconstruct an original transmission image using the resulting optimal gradient map. Experimental results show that the proposed algorithm removes the reflection artifacts from glass images faithfully and outperforms the existing algorithms on typical glass images.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The images taken through glass often capture a target transmitted scene as well as undesired reflected scenes. In this paper, we propose a low-rank matrix completion algorithm to remove reflection artifacts automatically from multiple glass images taken at slightly different camera locations. We assume that the transmitted scenes are more dominant than the reflected scenes in typical glass images. We first warp the multiple glass images to a reference image, where the gradients are consistent in the transmission images while the gradients are varying across the reflection images. Based on this observation, we compute a gradient reliability such that the pixels belonging to the salient edges of the transmission image are assigned high reliability. Then we suppress the gradients of the reflection images and recover the gradients of the transmission images only, by solving a low-rank matrix completion problem in gradient domain. We reconstruct an original transmission image using the resulting optimal gradient map. Experimental results show that the proposed algorithm removes the reflection artifacts from glass images faithfully and outperforms the existing algorithms on typical glass images.", "fno": "0457d872", "keywords": [ "Cameras", "Edge Detection", "Gradient Methods", "Image Capture", "Image Reconstruction", "Matrix Algebra", "Low Rank Matrix Completion Algorithm", "Multiple Glass Images", "Reference Image", "Reflection Images", "Reflection Removal", "Salient Edges", "Transmission Image", "Glass", "Reliability", "Image Edge Detection", "Image Reconstruction", "Cameras", "Color", "Image Color Analysis" ], "authors": [ { "affiliation": null, "fullName": "Byeong-Ju Han", "givenName": "Byeong-Ju", "surname": "Han", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jae-Young Sim", "givenName": "Jae-Young", "surname": "Sim", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-07-01T00:00:00", "pubType": "proceedings", "pages": "3872-3880", "year": "2017", "issn": "1063-6919", "isbn": "978-1-5386-0457-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "0457d862", "articleId": "12OmNqIQSfz", "__typename": "AdjacentArticleType" }, "next": { "fno": "0457d881", "articleId": "12OmNzahcbP", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2013/2840/0/2840c432", "title": "Exploiting Reflection Change for Automatic Reflection Removal", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840c432/12OmNwoxSfk", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032d942", "title": "Benchmarking Single-Image Reflection Removal Algorithms", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d942/12OmNzE54Kv", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/02/ttp2014020209", "title": "A Physically-Based Approach to Reflection Separation: From Physical Modeling to Constrained Optimization", "doi": null, "abstractUrl": "/journal/tp/2014/02/ttp2014020209/13rRUx0xQ0H", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000e777", "title": "CRRN: Multi-scale Guided Concurrent Reflection Removal Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000e777/17D45WK5Anl", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000e786", "title": "Single Image Reflection Separation with Perceptual Losses", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000e786/17D45WrVg57", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900d039", "title": "A Categorized Reflection Removal Dataset with Diverse Real-world Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900d039/1G56eQTuB3O", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d562", "title": "Single Image Reflection Removal Through Cascaded Refinement", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d562/1m3nZMsOIP6", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b747", "title": "Polarized Reflection Removal With Perfect Alignment in the Wild", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b747/1m3o3ONT3cQ", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700c032", "title": "Single Image Reflection Removal with Edge Guidance, Reflection Classifier, and Recurrent Decomposition", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700c032/1uqGz4hpgtO", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100b886", "title": "Distilling Reflection Dynamics for Single-Image Reflection Removal", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100b886/1yNhYeHIVRS", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyKJiwQ", "title": "2013 IEEE International Conference on Computational Photography (ICCP)", "acronym": "iccp", "groupId": "1800125", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNz3bdGu", "doi": "10.1109/ICCPhot.2013.6528313", "title": "Transformation guided image completion", "normalizedTitle": "Transformation guided image completion", "abstract": "In this paper, we describe a new interactive image completion system that allows users to easily specify various forms of mid-level structures in the image. Our system supports the specification of four basic symmetric types: reflection, translation, rotation, and glide. The user inputs are automatically converted into guidance maps that encode possible candidate shifts and, indirectly, local transformations of rotation and scale. These guidance maps are used in conjunction with a color matching cost for image completion. We show that our system is capable of handling a variety of challenging examples.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we describe a new interactive image completion system that allows users to easily specify various forms of mid-level structures in the image. Our system supports the specification of four basic symmetric types: reflection, translation, rotation, and glide. The user inputs are automatically converted into guidance maps that encode possible candidate shifts and, indirectly, local transformations of rotation and scale. These guidance maps are used in conjunction with a color matching cost for image completion. We show that our system is capable of handling a variety of challenging examples.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we describe a new interactive image completion system that allows users to easily specify various forms of mid-level structures in the image. Our system supports the specification of four basic symmetric types: reflection, translation, rotation, and glide. The user inputs are automatically converted into guidance maps that encode possible candidate shifts and, indirectly, local transformations of rotation and scale. These guidance maps are used in conjunction with a color matching cost for image completion. We show that our system is capable of handling a variety of challenging examples.", "fno": "06528313", "keywords": [ "Optimization", "Shape", "Image Color Analysis", "Image Segmentation", "Surface Treatment", "Green Products", "User Interfaces" ], "authors": [ { "affiliation": "Univ. of Illinois, Urbana, IL, USA", "fullName": "J. Huang", "givenName": "J.", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "J. Kopf", "givenName": "J.", "surname": "Kopf", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. of Illinois, Urbana, IL, USA", "fullName": "N. Ahuja", "givenName": "N.", "surname": "Ahuja", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "S. B. Kang", "givenName": "S. B.", "surname": "Kang", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-04-01T00:00:00", "pubType": "proceedings", "pages": "1-9", "year": "2013", "issn": null, "isbn": "978-1-4673-6463-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06528311", "articleId": "12OmNzwZ6i8", "__typename": "AdjacentArticleType" }, "next": { "fno": "06528296", "articleId": "12OmNzd7bfq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2019/1975/0/197500b999", "title": "Shadow Patching: Guided Image Completion for Shadow Removal", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500b999/18j8EKtiIQU", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g240", "title": "GuideFormer: Transformers for Image Guided Depth Completion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g240/1H1lIhe3jva", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/05/09913676", "title": "Multi-Channel Attention Selection GANs for Guided Image-to-Image Translation", "doi": null, "abstractUrl": "/journal/tp/2023/05/09913676/1Hmg4aXUpc4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600c567", "title": "An Unified Framework for Language Guided Image Completion", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600c567/1KxUDF59b8Y", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10093076", "title": "Point Cloud Scene Completion with Joint Color and Semantic Estimation from Single RGB-D Image", "doi": null, "abstractUrl": "/journal/tp/5555/01/10093076/1M61SaJ53LG", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv-2/2019/2850/0/285000a147", "title": "Volume Completion for Trimmed B-Reps", "doi": null, "abstractUrl": "/proceedings-article/iv-2/2019/285000a147/1cMEQrpMzQI", "parentPublication": { "id": "proceedings/iv-2/2019/2850/0", "title": "2019 23rd International Conference in Information Visualization – Part II", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c811", "title": "Depth Completion From Sparse LiDAR Data With Depth-Normal Constraints", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c811/1hQqnPwPXOM", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j015", "title": "Guided Image-to-Image Translation With Bi-Directional Feature Transformation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j015/1hVlsG2XlK0", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093407", "title": "A Multi-Scale Guided Cascade Hourglass Network for Depth Completion", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093407/1jPbC8OCq6Q", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900p5885", "title": "View-Guided Point Cloud Completion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900p5885/1yeJzxEdmPm", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzuIjee", "title": "Digital Media and Digital Content Management, Workshop on", "acronym": "dmdcm", "groupId": "1800440", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNzt0IQR", "doi": "10.1109/DMDCM.2011.20", "title": "Virtual Completion of Facial Image in Ancient Murals", "normalizedTitle": "Virtual Completion of Facial Image in Ancient Murals", "abstract": "Restore damaged face images is an important issue for ancient murals conservation and research. Since the structure and style of facial image varies with identity, dynasties, location and artists, it is a time consuming and challenging job for researchers to achieve objective and precise restoration. In this paper, we propose a novel facial image completion method based on a semantic learning framework. By few user interactions, our approach can fill missing regions with a reconstructed face which is based on candidate images with high semantic similarity. Since most of the ancient murals contain deteriorations, to archive visual pleasant result, we adopt a PCA based region blending method to produce a seamless completion. Moreover, for the criteria of mural restoration are evaluated in the view of both scientific and art, our completion process can provide the researcher an opportunity to preview a diverse set of the final results virtually before they actually start their job. The real applications in Dunhuang mural show the efficiency and effectiveness of our approach. Furthermore, these experiments suggested the potential applications for restoration facial image in ancient line drawing.", "abstracts": [ { "abstractType": "Regular", "content": "Restore damaged face images is an important issue for ancient murals conservation and research. Since the structure and style of facial image varies with identity, dynasties, location and artists, it is a time consuming and challenging job for researchers to achieve objective and precise restoration. In this paper, we propose a novel facial image completion method based on a semantic learning framework. By few user interactions, our approach can fill missing regions with a reconstructed face which is based on candidate images with high semantic similarity. Since most of the ancient murals contain deteriorations, to archive visual pleasant result, we adopt a PCA based region blending method to produce a seamless completion. Moreover, for the criteria of mural restoration are evaluated in the view of both scientific and art, our completion process can provide the researcher an opportunity to preview a diverse set of the final results virtually before they actually start their job. The real applications in Dunhuang mural show the efficiency and effectiveness of our approach. Furthermore, these experiments suggested the potential applications for restoration facial image in ancient line drawing.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Restore damaged face images is an important issue for ancient murals conservation and research. Since the structure and style of facial image varies with identity, dynasties, location and artists, it is a time consuming and challenging job for researchers to achieve objective and precise restoration. In this paper, we propose a novel facial image completion method based on a semantic learning framework. By few user interactions, our approach can fill missing regions with a reconstructed face which is based on candidate images with high semantic similarity. Since most of the ancient murals contain deteriorations, to archive visual pleasant result, we adopt a PCA based region blending method to produce a seamless completion. Moreover, for the criteria of mural restoration are evaluated in the view of both scientific and art, our completion process can provide the researcher an opportunity to preview a diverse set of the final results virtually before they actually start their job. The real applications in Dunhuang mural show the efficiency and effectiveness of our approach. Furthermore, these experiments suggested the potential applications for restoration facial image in ancient line drawing.", "fno": "4413a203", "keywords": [ "Image Completion", "In Painting", "Semantic Retrieval", "Facial Structure Reconstruction", "Ancient Murals" ], "authors": [ { "affiliation": null, "fullName": "Qi Wang", "givenName": "Qi", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dongmin Lu", "givenName": "Dongmin", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hongxin Zhang", "givenName": "Hongxin", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "dmdcm", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-05-01T00:00:00", "pubType": "proceedings", "pages": "203-209", "year": "2011", "issn": null, "isbn": "978-0-7695-4413-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4413a198", "articleId": "12OmNrYlmGM", "__typename": "AdjacentArticleType" }, "next": { "fno": "4413a210", "articleId": "12OmNxFsmoZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgi/1999/0185/0/01850036", "title": "Visualizing Knowledge about Virtual Reconstructions of Ancient Architecture", "doi": null, "abstractUrl": "/proceedings-article/cgi/1999/01850036/12OmNCu4nbE", "parentPublication": { "id": "proceedings/cgi/1999/0185/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgec/2010/4281/0/4281a094", "title": "A Novel Exemplar-Based Image Completion Scheme with Adaptive TV-Constraint", "doi": null, "abstractUrl": "/proceedings-article/icgec/2010/4281a094/12OmNrIaekd", "parentPublication": { "id": "proceedings/icgec/2010/4281/0", "title": "Genetic and Evolutionary Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a555", "title": "PatchMatch-Based Content Completion of Stereo Image Pairs", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a555/12OmNrkBwy5", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paciia/2008/3490/1/3490a413", "title": "Image Completion for Overlapping Chromosomes", "doi": null, "abstractUrl": "/proceedings-article/paciia/2008/3490a413/12OmNvrvjdO", "parentPublication": { "id": "proceedings/paciia/2008/3490/1", "title": "Pacific-Asia Workshop on Computational Intelligence and Industrial Application, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2012/4896/0/4896a305", "title": "Image Completion with Automatic Structure Propagation", "doi": null, "abstractUrl": "/proceedings-article/cis/2012/4896a305/12OmNzBOimX", "parentPublication": { "id": "proceedings/cis/2012/4896/0", "title": "2012 Eighth International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2003/05/k1338", "title": "Using Hybrid Knowledge Engineering and Image Processing in Color Virtual Restoration of Ancient Murals", "doi": null, "abstractUrl": "/journal/tk/2003/05/k1338/13rRUILtJrh", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486504", "title": "Mural2Sketch: A Combined Line Drawing Generation Method for Ancient Mural Painting", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486504/14jQfMYohcz", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500a521", "title": "Ancient Painting to Natural Image: A New Solution for Painting Processing", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500a521/18j8QuxyyWI", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrhciai/2022/9182/0/918200a242", "title": "Tomb Mural Image Enhancement based on Improved CycleGAN", "doi": null, "abstractUrl": "/proceedings-article/vrhciai/2022/918200a242/1LxfftZu1z2", "parentPublication": { "id": "proceedings/vrhciai/2022/9182/0", "title": "2022 International Conference on Virtual Reality, Human-Computer Interaction and Artificial Intelligence (VRHCIAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2019/4689/0/468900b050", "title": "Research on the Application of 3D Virtual Simulation Technology in Ancient Village Restoration", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2019/468900b050/1h0FgeCRbdS", "parentPublication": { "id": "proceedings/icmcce/2019/4689/0", "title": "2019 4th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1gyshXRzHpK", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1gysnhYMnjG", "doi": "10.1109/ISMAR-Adjunct.2019.00-36", "title": "Faithful Face Image Completion for HMD Occlusion Removal", "normalizedTitle": "Faithful Face Image Completion for HMD Occlusion Removal", "abstract": "Head-mounted-displays (HMDs) provide immersive experiences of virtual content. While being flexible, HMDs could be a hindrance for Virtual Reality (VR) applications such as VR teleconference where facial components and expressions of the user are partially occluded thus cannot be seen by others. We present an automatic face image completion solution that treats the occluded region as a hole and completes the hole with the help of an occlusion-free reference image of the same person. Given the occluded input image and an occlusion-free reference image, our method first computes head pose features from estimated facial landmarks. The head pose features, as well as images, are then fed into a generative adversarial network (GAN) to synthesize the output image. Our method can generate faithful results from various input cases and outperforms other face completion methods. It provides a light-weighted solution to HMD occlusion removal and has the potential to benefit VR applications.", "abstracts": [ { "abstractType": "Regular", "content": "Head-mounted-displays (HMDs) provide immersive experiences of virtual content. While being flexible, HMDs could be a hindrance for Virtual Reality (VR) applications such as VR teleconference where facial components and expressions of the user are partially occluded thus cannot be seen by others. We present an automatic face image completion solution that treats the occluded region as a hole and completes the hole with the help of an occlusion-free reference image of the same person. Given the occluded input image and an occlusion-free reference image, our method first computes head pose features from estimated facial landmarks. The head pose features, as well as images, are then fed into a generative adversarial network (GAN) to synthesize the output image. Our method can generate faithful results from various input cases and outperforms other face completion methods. It provides a light-weighted solution to HMD occlusion removal and has the potential to benefit VR applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Head-mounted-displays (HMDs) provide immersive experiences of virtual content. While being flexible, HMDs could be a hindrance for Virtual Reality (VR) applications such as VR teleconference where facial components and expressions of the user are partially occluded thus cannot be seen by others. We present an automatic face image completion solution that treats the occluded region as a hole and completes the hole with the help of an occlusion-free reference image of the same person. Given the occluded input image and an occlusion-free reference image, our method first computes head pose features from estimated facial landmarks. The head pose features, as well as images, are then fed into a generative adversarial network (GAN) to synthesize the output image. Our method can generate faithful results from various input cases and outperforms other face completion methods. It provides a light-weighted solution to HMD occlusion removal and has the potential to benefit VR applications.", "fno": "476500a251", "keywords": [ "Face Recognition", "Feature Extraction", "Helmet Mounted Displays", "Neural Nets", "Pose Estimation", "Virtual Reality", "HMD Occlusion Removal", "Head Mounted Displays", "Immersive Experiences", "Virtual Content", "Virtual Reality Applications", "VR Teleconference", "Facial Components", "Occluded Region", "Occlusion Free Reference Image", "Occluded Input Image", "VR Applications", "Facial Landmarks", "Automatic Face Image Completion", "Head Pose Features", "Generative Adversarial Network", "Face", "Resists", "Generative Adversarial Networks", "Generators", "Hardware", "Gallium Nitride", "Face Completion", "Virtual Reality", "Generative Adversarial Networks" ], "authors": [ { "affiliation": "Beihang University", "fullName": "Miao Wang", "givenName": "Miao", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University", "fullName": "Xin Wen", "givenName": "Xin", "surname": "Wen", "__typename": "ArticleAuthorType" }, { "affiliation": "Tsinghua University", "fullName": "Shi-Min Hu", "givenName": "Shi-Min", "surname": "Hu", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "251-256", "year": "2019", "issn": null, "isbn": "978-1-7281-4765-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "476500a245", "articleId": "1gysiY6ymKQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "476500a257", "articleId": "1gysnSDS8Wk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2013/2869/0/06671828", "title": "View management for driver assistance in an HMD", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671828/12OmNAmVH7U", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642365", "title": "VR Exploration Assistance through Automatic Occlusion Removal", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642365/17PYEj2mz9Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676155", "title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2022/9774/0/977400a167", "title": "Attention based Occlusion Removal for Hybrid Telepresence Systems", "doi": null, "abstractUrl": "/proceedings-article/crv/2022/977400a167/1GeCvG8dPna", "parentPublication": { "id": "proceedings/crv/2022/9774/0", "title": "2022 19th Conference on Robots and Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798025", "title": "Occlusion Management in VR: A Comparative Study", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798025/1cJ1f6V69wY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797959", "title": "Human Face Reconstruction under a HMD Occlusion", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797959/1cJ1hGY1wCk", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300k0061", "title": "Face De-Occlusion Using 3D Morphable Model and Generative Adversarial Network", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300k0061/1hVlK979JGo", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300a661", "title": "Complete Moving Object Detection in the Context of Robust Subspace Learning", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300a661/1i5mHdPOacM", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102788", "title": "Occlusion-Aware GAN for Face De-Occlusion in the Wild", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102788/1kwqZfMSDIc", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a109", "title": "Generative RGB-D Face Completion for Head-Mounted Display Removal", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a109/1tnXncnHsIg", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3neAYB13G", "doi": "10.1109/CVPR42600.2020.01132", "title": "From Depth What Can You See? Depth Completion via Auxiliary Image Reconstruction", "normalizedTitle": "From Depth What Can You See? Depth Completion via Auxiliary Image Reconstruction", "abstract": "Depth completion recovers dense depth from sparse measurements, e.g., LiDAR. Existing depth-only methods use sparse depth as the only input. However, these methods may fail to recover semantics consistent boundaries, or small/thin objects due to 1) the sparse nature of depth points and 2) the lack of images to provide semantic cues. This paper continues this line of research and aims to overcome the above shortcomings. The unique design of our depth completion model is that it simultaneously outputs a reconstructed image and a dense depth map. Specifically, we formulate image reconstruction from sparse depth as an auxiliary task during training that is supervised by the unlabelled gray-scale images. During testing, our system accepts sparse depth as the only input, i.e., the image is not required. Our design allows the depth completion network to learn complementary image features that help to better understand object structures. The extra supervision incurred by image reconstruction is minimal, because no annotations other than the image are needed. We evaluate our method on the KITTI depth completion benchmark and show that depth completion can be significantly improved via the auxiliary supervision of image reconstruction. Our algorithm consistently outperforms depth-only methods and is also effective for indoor scenes like NYUv2.", "abstracts": [ { "abstractType": "Regular", "content": "Depth completion recovers dense depth from sparse measurements, e.g., LiDAR. Existing depth-only methods use sparse depth as the only input. However, these methods may fail to recover semantics consistent boundaries, or small/thin objects due to 1) the sparse nature of depth points and 2) the lack of images to provide semantic cues. This paper continues this line of research and aims to overcome the above shortcomings. The unique design of our depth completion model is that it simultaneously outputs a reconstructed image and a dense depth map. Specifically, we formulate image reconstruction from sparse depth as an auxiliary task during training that is supervised by the unlabelled gray-scale images. During testing, our system accepts sparse depth as the only input, i.e., the image is not required. Our design allows the depth completion network to learn complementary image features that help to better understand object structures. The extra supervision incurred by image reconstruction is minimal, because no annotations other than the image are needed. We evaluate our method on the KITTI depth completion benchmark and show that depth completion can be significantly improved via the auxiliary supervision of image reconstruction. Our algorithm consistently outperforms depth-only methods and is also effective for indoor scenes like NYUv2.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Depth completion recovers dense depth from sparse measurements, e.g., LiDAR. Existing depth-only methods use sparse depth as the only input. However, these methods may fail to recover semantics consistent boundaries, or small/thin objects due to 1) the sparse nature of depth points and 2) the lack of images to provide semantic cues. This paper continues this line of research and aims to overcome the above shortcomings. The unique design of our depth completion model is that it simultaneously outputs a reconstructed image and a dense depth map. Specifically, we formulate image reconstruction from sparse depth as an auxiliary task during training that is supervised by the unlabelled gray-scale images. During testing, our system accepts sparse depth as the only input, i.e., the image is not required. Our design allows the depth completion network to learn complementary image features that help to better understand object structures. The extra supervision incurred by image reconstruction is minimal, because no annotations other than the image are needed. We evaluate our method on the KITTI depth completion benchmark and show that depth completion can be significantly improved via the auxiliary supervision of image reconstruction. Our algorithm consistently outperforms depth-only methods and is also effective for indoor scenes like NYUv2.", "fno": "716800l1303", "keywords": [ "Feature Extraction", "Image Reconstruction", "Auxiliary Image Reconstruction", "Depth Completion Model", "Dense Depth Map", "Gray Scale Images", "Complementary Image Features", "Image Reconstruction", "Task Analysis", "Semantics", "Feature Extraction", "Training", "Testing", "Image Segmentation" ], "authors": [ { "affiliation": "The Australian National University; Data61, CSIRO", "fullName": "Kaiyue Lu", "givenName": "Kaiyue", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": "The Australian National University", "fullName": "Nick Barnes", "givenName": "Nick", "surname": "Barnes", "__typename": "ArticleAuthorType" }, { "affiliation": "Data61, CSIRO; The Australian National University", "fullName": "Saeed Anwar", "givenName": "Saeed", "surname": "Anwar", "__typename": "ArticleAuthorType" }, { "affiliation": "The Australian National University", "fullName": "Liang Zheng", "givenName": "Liang", "surname": "Zheng", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "11303-11312", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800l1293", "articleId": "1m3nPpo0loY", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800l1313", "articleId": "1m3o3dtV2QU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2018/8425/0/842500a052", "title": "Sparse and Dense Data with CNNs: Depth Completion and Semantic Segmentation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a052/17D45W1Oa5Q", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2022/5824/0/582400a063", "title": "Depth Completion Auto-Encoder", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2022/582400a063/1B12ytwOKgo", "parentPublication": { "id": "proceedings/wacvw/2022/5824/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956621", "title": "Selective Progressive Learning for Sparse Depth Completion", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956621/1IHpfHNNvS8", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956653", "title": "Depth Completion via A Dual-Fusion Method", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956653/1IHq0aJyZLG", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600f807", "title": "SIUNet: Sparsity Invariant U-Net for Edge-Aware Depth Completion", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600f807/1L8qmMNw9X2", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300b070", "title": "Indoor Depth Completion with Boundary Consistency and Self-Attention", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300b070/1i5mm6MV2ZW", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093407", "title": "A Multi-Scale Guided Cascade Hourglass Network for Depth Completion", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093407/1jPbC8OCq6Q", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093349", "title": "Depth Completion via Deep Basis Fitting", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093349/1jPbCGTkPf2", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900c190", "title": "DeepDNet: Deep Dense Network for Depth Completion Task", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900c190/1yXsP1ybj9u", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900l1073", "title": "Sparse Auxiliary Networks for Unified Monocular Depth Prediction and Completion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900l1073/1yeJkffxNYI", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAsTgX5", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNCbCrZ3", "doi": "10.1109/ICCV.2009.5459301", "title": "Tracking in unstructured crowded scenes", "normalizedTitle": "Tracking in unstructured crowded scenes", "abstract": "This paper presents a target tracking framework for unstructured crowded scenes. Unstructured crowded scenes are defined as those scenes where the motion of a crowd appears to be random with different participants moving in different directions over time. This means each spatial location in such scenes supports more than one, or multi-modal, crowd behavior. The case of tracking in structured crowded scenes, where the crowd moves coherently in a common direction, and the direction of motion does not vary over time, was previously handled in. In this work, we propose to model various crowd behavior (or motion) modalities at different locations of the scene by employing Correlated Topic Model (CTM) of. In our construction, words correspond to low level quantized motion features and topics correspond to crowd behaviors. It is then assumed that motion at each location in an unstructured crowd scene is generated by a set of behavior proportions, where behaviors represent distributions over low-level motion features. This way any one location in the scene may support multiple crowd behavior modalities and can be used as prior information for tracking. Our approach enables us to model a diverse set of unstructured crowd domains, which range from cluttered time-lapse microscopy videos of cell populations in vitro, to footage of crowded sporting events.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a target tracking framework for unstructured crowded scenes. Unstructured crowded scenes are defined as those scenes where the motion of a crowd appears to be random with different participants moving in different directions over time. This means each spatial location in such scenes supports more than one, or multi-modal, crowd behavior. The case of tracking in structured crowded scenes, where the crowd moves coherently in a common direction, and the direction of motion does not vary over time, was previously handled in. In this work, we propose to model various crowd behavior (or motion) modalities at different locations of the scene by employing Correlated Topic Model (CTM) of. In our construction, words correspond to low level quantized motion features and topics correspond to crowd behaviors. It is then assumed that motion at each location in an unstructured crowd scene is generated by a set of behavior proportions, where behaviors represent distributions over low-level motion features. This way any one location in the scene may support multiple crowd behavior modalities and can be used as prior information for tracking. Our approach enables us to model a diverse set of unstructured crowd domains, which range from cluttered time-lapse microscopy videos of cell populations in vitro, to footage of crowded sporting events.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a target tracking framework for unstructured crowded scenes. Unstructured crowded scenes are defined as those scenes where the motion of a crowd appears to be random with different participants moving in different directions over time. This means each spatial location in such scenes supports more than one, or multi-modal, crowd behavior. The case of tracking in structured crowded scenes, where the crowd moves coherently in a common direction, and the direction of motion does not vary over time, was previously handled in. In this work, we propose to model various crowd behavior (or motion) modalities at different locations of the scene by employing Correlated Topic Model (CTM) of. In our construction, words correspond to low level quantized motion features and topics correspond to crowd behaviors. It is then assumed that motion at each location in an unstructured crowd scene is generated by a set of behavior proportions, where behaviors represent distributions over low-level motion features. This way any one location in the scene may support multiple crowd behavior modalities and can be used as prior information for tracking. Our approach enables us to model a diverse set of unstructured crowd domains, which range from cluttered time-lapse microscopy videos of cell populations in vitro, to footage of crowded sporting events.", "fno": "05459301", "keywords": [ "Target Tracking", "Video Signal Processing", "Unstructured Crowded Scenes", "Target Tracking Framework", "Spatial Location", "Crowd Behavior Proportions", "Correlated Topic Model", "CTM", "Motion Features", "Time Lapse Microscopy Videos", "Cell Populations", "Layout", "Videos", "Legged Locomotion", "Target Tracking", "Robot Vision Systems", "Computer Vision", "Microscopy", "In Vitro", "Rail Transportation", "Airports" ], "authors": [ { "affiliation": "Computer Vision Lab, University of Central Florida, USA", "fullName": "Mikel Rodriguez", "givenName": "Mikel", "surname": "Rodriguez", "__typename": "ArticleAuthorType" }, { "affiliation": "Robotics Institute, Carnegie Mellon University, USA", "fullName": "Saad Ali", "givenName": "Saad", "surname": "Ali", "__typename": "ArticleAuthorType" }, { "affiliation": "Robotics Institute, Carnegie Mellon University, USA", "fullName": "Takeo Kanade", "givenName": "Takeo", "surname": "Kanade", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-09-01T00:00:00", "pubType": "proceedings", "pages": "1389-1396", "year": "2009", "issn": "1550-5499", "isbn": "978-1-4244-4420-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05459217", "articleId": "12OmNqIhFYN", "__typename": "AdjacentArticleType" }, "next": { "fno": "05459297", "articleId": "12OmNwpXRZb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2010/6984/0/05540149", "title": "Tracking with local spatio-temporal motion patterns in extremely crowded scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05540149/12OmNAkWvm8", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109e064", "title": "Learning Major Pedestrian Flows in Crowded Scenes", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109e064/12OmNwHz05o", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109d533", "title": "Detecting Dominant Motion Flows in Unstructured/Structured Crowd Scenes", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109d533/12OmNweTvM1", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2010/6984/0/05539872", "title": "Anomaly detection in crowded scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05539872/12OmNwpoFCv", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d137", "title": "Pedestrian Travel Time Estimation in Crowded Scenes", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d137/12OmNxFJXMy", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209c203", "title": "Anomaly Detection through Spatio-temporal Context Modeling in Crowded Scenes", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209c203/12OmNzd7bDq", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/05/05989832", "title": "Tracking Pedestrians Using Local Spatio-Temporal Motion Patterns in Extremely Crowded Scenes", "doi": null, "abstractUrl": "/journal/tp/2012/05/05989832/13rRUxcsYNa", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200m2777", "title": "DnD: Dense Depth Estimation in Crowded Dynamic Indoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200m2777/1BmL6MiYHcI", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600b465", "title": "Learning to Estimate Robust 3D Human Mesh from In-the-Wild Crowded Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600b465/1H1kkMKDubS", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600a847", "title": "Progressive End-to-End Object Detection in Crowded Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600a847/1H1kz34dDpu", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrkjVqA", "title": "2015 IEEE Scientific Visualization Conference (SciVis)", "acronym": "scivis", "groupId": "1811924", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNrYlmLV", "doi": "10.1109/SciVis.2015.7429505", "title": "Real-time interactive time correction on the GPU", "normalizedTitle": "Real-time interactive time correction on the GPU", "abstract": "The study of physical phenomena and their dynamic evolution is supported by the analysis and visualization of time-enabled data. In many applications, available data are sparsely distributed in the space-time domain, which leads to incomprehensible visualizations. We present an interactive approach for the dynamic tracking and visualization of measured data particles through advection in a simulated flow. We introduce a fully GPU-based technique for efficient spatio-temporal interpolation, using a kd-tree forest for acceleration. As the user interacts with the system using a time slider, particle positions are reconstructed for the time selected by the user. Our results show that the proposed technique achieves highly accurate parallel tracking for thousands of particles. The rendering performance is mainly affected by the size of the query set.", "abstracts": [ { "abstractType": "Regular", "content": "The study of physical phenomena and their dynamic evolution is supported by the analysis and visualization of time-enabled data. In many applications, available data are sparsely distributed in the space-time domain, which leads to incomprehensible visualizations. We present an interactive approach for the dynamic tracking and visualization of measured data particles through advection in a simulated flow. We introduce a fully GPU-based technique for efficient spatio-temporal interpolation, using a kd-tree forest for acceleration. As the user interacts with the system using a time slider, particle positions are reconstructed for the time selected by the user. Our results show that the proposed technique achieves highly accurate parallel tracking for thousands of particles. The rendering performance is mainly affected by the size of the query set.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The study of physical phenomena and their dynamic evolution is supported by the analysis and visualization of time-enabled data. In many applications, available data are sparsely distributed in the space-time domain, which leads to incomprehensible visualizations. We present an interactive approach for the dynamic tracking and visualization of measured data particles through advection in a simulated flow. We introduce a fully GPU-based technique for efficient spatio-temporal interpolation, using a kd-tree forest for acceleration. As the user interacts with the system using a time slider, particle positions are reconstructed for the time selected by the user. Our results show that the proposed technique achieves highly accurate parallel tracking for thousands of particles. The rendering performance is mainly affected by the size of the query set.", "fno": "07429505", "keywords": [ "Data Visualization", "Arrays", "Graphics Processing Units", "Satellites", "Interpolation", "Atmospheric Measurements", "Extraterrestrial Measurements", "Applications", "I 3 8 COMPUTER GRAPHICS" ], "authors": [ { "affiliation": "Virginia Tech", "fullName": "Mai Elshehaly", "givenName": "Mai", "surname": "Elshehaly", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech", "fullName": "Denis Gračanin", "givenName": "Denis", "surname": "Gračanin", "__typename": "ArticleAuthorType" }, { "affiliation": "Ain Shams University", "fullName": "Mohamed Gad", "givenName": "Mohamed", "surname": "Gad", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech", "fullName": "Junpeng Wang", "givenName": "Junpeng", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Alexandria University", "fullName": "Hicham G. Elmongui", "givenName": "Hicham G.", "surname": "Elmongui", "__typename": "ArticleAuthorType" } ], "idPrefix": "scivis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-10-01T00:00:00", "pubType": "proceedings", "pages": "145-146", "year": "2015", "issn": null, "isbn": "978-1-4673-9785-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07429504", "articleId": "12OmNrIaemh", "__typename": "AdjacentArticleType" }, "next": { "fno": "07429506", "articleId": "12OmNy3AgD2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2011/4589/0/4589a333", "title": "TSF-Slider: Combining Time- and Structure-Based Media Navigation in One Navigation Component", "doi": null, "abstractUrl": "/proceedings-article/ism/2011/4589a333/12OmNqBtiTb", "parentPublication": { "id": "proceedings/ism/2011/4589/0", "title": "2011 IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smartcomp/2014/5711/0/07043840", "title": "Evaluation of PM2.5 and PM10 using normalized first-order absolute sum of high-frequency spectrum", "doi": null, "abstractUrl": "/proceedings-article/smartcomp/2014/07043840/12OmNx8fi8I", "parentPublication": { "id": "proceedings/smartcomp/2014/5711/0", "title": "2014 International Conference on Smart Computing (SMARTCOMP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1996/864/0/00568150", "title": "Interactive exploration and modeling of large data sets: a case study with Venus light scattering data", "doi": null, "abstractUrl": "/proceedings-article/visual/1996/00568150/12OmNxEBzho", "parentPublication": { "id": "proceedings/visual/1996/864/0", "title": "Proceedings of Seventh Annual IEEE Visualization '96", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1990/2083/0/00146391", "title": "The application of transport theory to visualization of 3D scalar data fields", "doi": null, "abstractUrl": "/proceedings-article/visual/1990/00146391/12OmNxHryfi", "parentPublication": { "id": "proceedings/visual/1990/2083/0", "title": "1990 First IEEE Conference on Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2014/6513/0/6513a652", "title": "On the Application and Performance of MongoDB for Climate Satellite Data", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2014/6513a652/12OmNxyDZdE", "parentPublication": { "id": "proceedings/trustcom/2014/6513/0", "title": "2014 IEEE 13th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispan-fcst-iscc/2017/0840/0/0840a352", "title": "Particle-Cell Detecting and Tracking in Live-Cell Time-Lapse Images", "doi": null, "abstractUrl": "/proceedings-article/ispan-fcst-iscc/2017/0840a352/12OmNzxgHvm", "parentPublication": { "id": "proceedings/ispan-fcst-iscc/2017/0840/0", "title": "2017 14th International Symposium on Pervasive Systems, Algorithms and Networks & 2017 11th International Conference on Frontier of Computer Science and Technology & 2017 Third International Symposium of Creative Computing (ISPAN-FCST-ISCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061579", "title": "Hypothesis Generation in Climate Research with Interactive Visual Data Exploration", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061579/13rRUxlgy3y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2022/8487/0/848700a051", "title": "Real-time Auditory Feedback System for Bow-tilt Correction while Aiming in Archery", "doi": null, "abstractUrl": "/proceedings-article/bibe/2022/848700a051/1J6hEwB72mc", "parentPublication": { "id": "proceedings/bibe/2022/8487/0", "title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150747", "title": "Coarse-to-Fine Hamiltonian Dynamics of Hierarchical Flows in Computational Anatomy", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150747/1lPGZlTvO4E", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222377", "title": "Interactive Visualization of Atmospheric Effects for Celestial Bodies", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222377/1nTrHSFagNy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCmpcNk", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNzYeAMV", "doi": "10.1109/VISUAL.2005.1532825", "title": "Marching diamonds for unstructured meshes", "normalizedTitle": "Marching diamonds for unstructured meshes", "abstract": "We present a higher-order approach to the extraction of isosurfaces from unstructured meshes. Existing methods use linear interpolation along each mesh edge to find isosurface intersections. In contrast, our method determines intersections by performing barycentric interpolation over diamonds formed by the tetrahedra incident to each edge. Our method produces smoother, more accurate isosurfaces. Additionally, interpolating over diamonds, rather than linearly interpolating edge endpoints. enables us to identify up to two isosurface intersections per edge. This paper details how our new technique extracts isopoints, and presents a simple connection strategy for forming a triangle mesh isosurface.", "abstracts": [ { "abstractType": "Regular", "content": "We present a higher-order approach to the extraction of isosurfaces from unstructured meshes. Existing methods use linear interpolation along each mesh edge to find isosurface intersections. In contrast, our method determines intersections by performing barycentric interpolation over diamonds formed by the tetrahedra incident to each edge. Our method produces smoother, more accurate isosurfaces. Additionally, interpolating over diamonds, rather than linearly interpolating edge endpoints. enables us to identify up to two isosurface intersections per edge. This paper details how our new technique extracts isopoints, and presents a simple connection strategy for forming a triangle mesh isosurface.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a higher-order approach to the extraction of isosurfaces from unstructured meshes. Existing methods use linear interpolation along each mesh edge to find isosurface intersections. In contrast, our method determines intersections by performing barycentric interpolation over diamonds formed by the tetrahedra incident to each edge. Our method produces smoother, more accurate isosurfaces. Additionally, interpolating over diamonds, rather than linearly interpolating edge endpoints. enables us to identify up to two isosurface intersections per edge. This paper details how our new technique extracts isopoints, and presents a simple connection strategy for forming a triangle mesh isosurface.", "fno": "01532825", "keywords": [ "Computational Geometry", "Mesh Generation", "Interpolation", "Image Representation", "Marching Diamonds", "Unstructured Mesh", "Isosurface Extraction", "Linear Interpolation Mesh Edge Endpoints", "Isosurface Intersection", "Barycentric Interpolation", "Triangle Mesh Isosurface", "Isosurfaces", "Interpolation", "Data Analysis", "Data Visualization", "Computer Science", "Aircraft", "Data Mining", "Chromium", "Computer Graphics", "Surface Cracks" ], "authors": [ { "affiliation": "Comput. Sci. Dept., California Univ., Davis, CA, USA", "fullName": "J.C. Anderson", "givenName": "J.C.", "surname": "Anderson", "__typename": "ArticleAuthorType" }, { "affiliation": "Comput. Sci. Dept., California Univ., Davis, CA, USA", "fullName": "J.C. Bennett", "givenName": "J.C.", "surname": "Bennett", "__typename": "ArticleAuthorType" }, { "affiliation": "Comput. Sci. Dept., California Univ., Davis, CA, USA", "fullName": "K.I. Joy", "givenName": "K.I.", "surname": "Joy", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-01-01T00:00:00", "pubType": "proceedings", "pages": "423,424,425,426,427,428,429", "year": "2005", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "27660039", "articleId": "12OmNAoUTua", "__typename": "AdjacentArticleType" }, "next": { "fno": "27660040", "articleId": "12OmNAfy7Ky", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2004/8788/0/87880489", "title": "Dual Marching Cubes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880489/12OmNAWpynS", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2002/7498/0/7498weber", "title": "Exploring Scalar Fields Using Critical Isovalues", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2002/7498weber/12OmNAYXWzS", "parentPublication": { "id": "proceedings/ieee-vis/2002/7498/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660054", "title": "Marching Diamonds for Unstructured Meshes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660054/12OmNC8dgmy", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2002/7498/0/7498balmelli", "title": "Volume Warping for Adaptive Isosurface Extraction", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2002/7498balmelli/12OmNqFJhHx", "parentPublication": { "id": "proceedings/ieee-vis/2002/7498/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmp/2004/2078/0/20780019", "title": "Chord Length (Motivated) Parameterization of Marching Cubes IsoSurfaces", "doi": null, "abstractUrl": "/proceedings-article/gmp/2004/20780019/12OmNxYbT1o", "parentPublication": { "id": "proceedings/gmp/2004/2078/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300009", "title": "MC*: Star Functions for Marching Cubes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300009/12OmNyprnzr", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2001/1227/0/12270244", "title": "Progressive Isosurface Extraction from Tetrahedral Meshes", "doi": null, "abstractUrl": "/proceedings-article/pg/2001/12270244/12OmNzaQoEM", "parentPublication": { "id": "proceedings/pg/2001/1227/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/04/ttg2010040583", "title": "Isodiamond Hierarchies: An Efficient Multiresolution Representation for Isosurfaces and Interval Volumes", "doi": null, "abstractUrl": "/journal/tg/2010/04/ttg2010040583/13rRUNvgz4c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010061198", "title": "On the Fractal Dimension of Isosurfaces", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010061198/13rRUxcsYLL", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440120", "title": "Exploring Time-Varying Multivariate Volume Data Using Matrix of Isosurface Similarity Maps", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440120/17D45Wuc38E", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1grOETjaFP2", "title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)", "acronym": "ldav", "groupId": "1800568", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1grOEWdpXq0", "doi": "10.1109/LDAV48142.2019.8944355", "title": "A Lifeline-Based Approach for Work Requesting and Parallel Particle Advection", "normalizedTitle": "A Lifeline-Based Approach for Work Requesting and Parallel Particle Advection", "abstract": "Particle advection, a fundamental building block for many flow visualization algorithms, is very difficult to parallelize efficiently. That said, work requesting is a promising technique to improve parallel performance for particle advection. With this work, we introduce a new work requesting-based method which uses the Lifeline scheduling method. To evaluate the impact of this new algorithm, we ran 92 experiments, running at concurrencies as high as 8192 cores, data sets as large as 17 billion cells, and as many as 16 million particles, comparing against other work requesting scheduling methods. Overall, our results show that Lifeline has significantly less idle time than other approaches, since it reduces the number of failed attempts to request work.", "abstracts": [ { "abstractType": "Regular", "content": "Particle advection, a fundamental building block for many flow visualization algorithms, is very difficult to parallelize efficiently. That said, work requesting is a promising technique to improve parallel performance for particle advection. With this work, we introduce a new work requesting-based method which uses the Lifeline scheduling method. To evaluate the impact of this new algorithm, we ran 92 experiments, running at concurrencies as high as 8192 cores, data sets as large as 17 billion cells, and as many as 16 million particles, comparing against other work requesting scheduling methods. Overall, our results show that Lifeline has significantly less idle time than other approaches, since it reduces the number of failed attempts to request work.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Particle advection, a fundamental building block for many flow visualization algorithms, is very difficult to parallelize efficiently. That said, work requesting is a promising technique to improve parallel performance for particle advection. With this work, we introduce a new work requesting-based method which uses the Lifeline scheduling method. To evaluate the impact of this new algorithm, we ran 92 experiments, running at concurrencies as high as 8192 cores, data sets as large as 17 billion cells, and as many as 16 million particles, comparing against other work requesting scheduling methods. Overall, our results show that Lifeline has significantly less idle time than other approaches, since it reduces the number of failed attempts to request work.", "fno": "08944355", "keywords": [ "Computational Fluid Dynamics", "Data Visualisation", "Flow Visualisation", "Parallel Processing", "Scheduling", "Parallel Particle Advection", "Flow Visualization Algorithms", "Parallel Performance", "Work Requesting Based Method", "Lifeline Based Approach", "Lifeline Scheduling Method", "Trajectory", "Processor Scheduling", "Arrays", "Supercomputers", "Scheduling", "Work Requesting", "Visualization", "Parallel Particle Advection", "Lifeline Based Scheduling" ], "authors": [ { "affiliation": "University of Oregon", "fullName": "Roba Binyahib", "givenName": "Roba", "surname": "Binyahib", "__typename": "ArticleAuthorType" }, { "affiliation": "Oak Ridge National Laboratory", "fullName": "David Pugmire", "givenName": "David", "surname": "Pugmire", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Oregon", "fullName": "Boyana Norris", "givenName": "Boyana", "surname": "Norris", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Oregon", "fullName": "Hank Childs", "givenName": "Hank", "surname": "Childs", "__typename": "ArticleAuthorType" } ], "idPrefix": "ldav", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "52-61", "year": "2019", "issn": null, "isbn": "978-1-7281-2605-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08944383", "articleId": "1grOFHDQFQk", "__typename": "AdjacentArticleType" }, "next": { "fno": "08944365", "articleId": "1grOFAcAvtu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sc/2012/0806/0/1000a066", "title": "Characterizing and mitigating work time inflation in task parallel programs", "doi": null, "abstractUrl": "/proceedings-article/sc/2012/1000a066/12OmNARiM6N", "parentPublication": { "id": "proceedings/sc/2012/0806/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2012/0806/0/1000a102", "title": "Parallel particle advection and FTLE computation for time-varying flow fields", "doi": null, "abstractUrl": "/proceedings-article/sc/2012/1000a102/12OmNBp52zQ", "parentPublication": { "id": "proceedings/sc/2012/0806/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2013/1659/0/06675152", "title": "Distributed parallel particle advection using work requesting", "doi": null, "abstractUrl": "/proceedings-article/ldav/2013/06675152/12OmNCuVaA5", "parentPublication": { "id": "proceedings/ldav/2013/1659/0", "title": "2013 IEEE Symposium on Large-Scale Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hipc/2014/5976/0/07116900", "title": "Particle advection performance over varied architectures and workloads", "doi": null, "abstractUrl": "/proceedings-article/hipc/2014/07116900/12OmNwCaCqV", "parentPublication": { "id": "proceedings/hipc/2014/5976/0", "title": "2014 21st International Conference on High Performance Computing (HiPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2012/4733/0/06378974", "title": "Parallel stream surface computation for large data sets", "doi": null, "abstractUrl": "/proceedings-article/ldav/2012/06378974/12OmNx0RIZL", "parentPublication": { "id": "proceedings/ldav/2012/4733/0", "title": "2012 IEEE Symposium on Large Data Analysis and Visualization (LDAV 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2016/3682/0/3682b058", "title": "Tuned to Terrible: A Study of Parallel Particle Advection State of the Practice", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2016/3682b058/12OmNxT56De", "parentPublication": { "id": "proceedings/ipdpsw/2016/3682/0", "title": "2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/shpcc/1994/5680/0/00296680", "title": "Parallel semi-Lagrangian advection on the sphere using PVM", "doi": null, "abstractUrl": "/proceedings-article/shpcc/1994/00296680/12OmNxV4iyH", "parentPublication": { "id": "proceedings/shpcc/1994/5680/0", "title": "Proceedings of IEEE Scalable High Performance Computing Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wetice/2014/4249/0/4249a173", "title": "Evaluation of Particle Swarm Optimization Applied to Grid Scheduling", "doi": null, "abstractUrl": "/proceedings-article/wetice/2014/4249a173/12OmNzcxZ6C", "parentPublication": { "id": "proceedings/wetice/2014/4249/0", "title": "2014 IEEE 23rd International Workshops on Enabling Technologies: Infrastructures for Collaborative Enterprise (WETICE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvris/2018/8031/0/803100a099", "title": "Cloud Computing Task Scheduling Policy Based on Improved Particle Swarm Optimization", "doi": null, "abstractUrl": "/proceedings-article/icvris/2018/803100a099/17D45XH89pO", "parentPublication": { "id": "proceedings/icvris/2018/8031/0", "title": "2018 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2019/2605/0/08944378", "title": "Parallel Particle Advection and Lagrangian Analysis for 3D-PLI Fiber Orientation Maps", "doi": null, "abstractUrl": "/proceedings-article/ldav/2019/08944378/1grOF71jiog", "parentPublication": { "id": "proceedings/ldav/2019/2605/0", "title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1oqKBBaHDqM", "title": "2020 IEEE International Conference on Cluster Computing (CLUSTER)", "acronym": "cluster", "groupId": "1000095", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1oqKCTxaDBe", "doi": "10.1109/CLUSTER49012.2020.00048", "title": "Parallel Particle Advection Bake-Off for Scientific Visualization Workloads", "normalizedTitle": "Parallel Particle Advection Bake-Off for Scientific Visualization Workloads", "abstract": "There are multiple algorithms for parallelizing particle advection for scientific visualization workloads. While many previous studies have contributed to the understanding of individual algorithms, our study aims to provide a holistic understanding of how algorithms perform relative to each other on various workloads. To accomplish this, we consider four popular parallelization algorithms and run a &#x201C;bake-off&#x201D; study (i.e., an empirical study) to identify the best matches for each. The study includes 216 tests, going to a concurrency of up to 8192 cores and considering data sets as large as 34 billion cells with 300 million particles. Overall, our study informs three important research questions: (1) which parallelization algorithms perform best for a given workload?, (2) why?, and (3) what are the unsolved problems in parallel particle advection? In terms of findings, we find that the seeding box is the most important factor in choosing the best algorithm, and also that there is a significant opportunity for improvement in execution time, scalability, and efficiency.", "abstracts": [ { "abstractType": "Regular", "content": "There are multiple algorithms for parallelizing particle advection for scientific visualization workloads. While many previous studies have contributed to the understanding of individual algorithms, our study aims to provide a holistic understanding of how algorithms perform relative to each other on various workloads. To accomplish this, we consider four popular parallelization algorithms and run a &#x201C;bake-off&#x201D; study (i.e., an empirical study) to identify the best matches for each. The study includes 216 tests, going to a concurrency of up to 8192 cores and considering data sets as large as 34 billion cells with 300 million particles. Overall, our study informs three important research questions: (1) which parallelization algorithms perform best for a given workload?, (2) why?, and (3) what are the unsolved problems in parallel particle advection? In terms of findings, we find that the seeding box is the most important factor in choosing the best algorithm, and also that there is a significant opportunity for improvement in execution time, scalability, and efficiency.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "There are multiple algorithms for parallelizing particle advection for scientific visualization workloads. While many previous studies have contributed to the understanding of individual algorithms, our study aims to provide a holistic understanding of how algorithms perform relative to each other on various workloads. To accomplish this, we consider four popular parallelization algorithms and run a “bake-off” study (i.e., an empirical study) to identify the best matches for each. The study includes 216 tests, going to a concurrency of up to 8192 cores and considering data sets as large as 34 billion cells with 300 million particles. Overall, our study informs three important research questions: (1) which parallelization algorithms perform best for a given workload?, (2) why?, and (3) what are the unsolved problems in parallel particle advection? In terms of findings, we find that the seeding box is the most important factor in choosing the best algorithm, and also that there is a significant opportunity for improvement in execution time, scalability, and efficiency.", "fno": "667700a381", "keywords": [ "Data Visualisation", "Parallel Algorithms", "Parallelization Algorithms", "Scientific Visualization Workloads", "Parallel Particle Advection Bake Off", "Concurrent Computing", "Scalability", "Conferences", "Clustering Algorithms", "Cluster Computing", "Scientific Visualization", "Particle Advection", "Flow Visualization", "Parallel Processing" ], "authors": [ { "affiliation": "University of Oregon", "fullName": "Roba Binyahib", "givenName": "Roba", "surname": "Binyahib", "__typename": "ArticleAuthorType" }, { "affiliation": "Oak Ridge National Laboratory", "fullName": "David Pugmire", "givenName": "David", "surname": "Pugmire", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Oregon", "fullName": "Abhishek Yenpure", "givenName": "Abhishek", "surname": "Yenpure", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Oregon", "fullName": "Hank Childs", "givenName": "Hank", "surname": "Childs", "__typename": "ArticleAuthorType" } ], "idPrefix": "cluster", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-09-01T00:00:00", "pubType": "proceedings", "pages": "381-391", "year": "2020", "issn": null, "isbn": "978-1-7281-6677-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "667700a371", "articleId": "1oqKGdiElAQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "667700a392", "articleId": "1oqKDFZKHq8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/mpcs/1994/6322/0/00367045", "title": "Experimental evaluation of affine schedules for matrix multiplication on the MasPar architecture", "doi": null, "abstractUrl": "/proceedings-article/mpcs/1994/00367045/12OmNClQ0vp", "parentPublication": { "id": "proceedings/mpcs/1994/6322/0", "title": "Proceedings of the First International Conference on Massively Parallel Computing Systems (MPCS) The Challenges of General-Purpose and Special-Purpose Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2013/1659/0/06675152", "title": "Distributed parallel particle advection using work requesting", "doi": null, "abstractUrl": "/proceedings-article/ldav/2013/06675152/12OmNCuVaA5", "parentPublication": { "id": "proceedings/ldav/2013/1659/0", "title": "2013 IEEE Symposium on Large-Scale Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hipc/2014/5976/0/07116900", "title": "Particle advection performance over varied architectures and workloads", "doi": null, "abstractUrl": "/proceedings-article/hipc/2014/07116900/12OmNwCaCqV", "parentPublication": { "id": "proceedings/hipc/2014/5976/0", "title": "2014 21st International Conference on High Performance Computing (HiPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/shpcc/1994/5680/0/00296721", "title": "Scalable execution control of grid-based scientific applications on parallel systems", "doi": null, "abstractUrl": "/proceedings-article/shpcc/1994/00296721/12OmNwMobcR", "parentPublication": { "id": "proceedings/shpcc/1994/5680/0", "title": "Proceedings of IEEE Scalable High Performance Computing Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2016/3682/0/3682b058", "title": "Tuned to Terrible: A Study of Parallel Particle Advection State of the Practice", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2016/3682b058/12OmNxT56De", "parentPublication": { "id": "proceedings/ipdpsw/2016/3682/0", "title": "2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2010/6533/0/05470761", "title": "Solving the advection PDE on the cell broadband engine", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2010/05470761/12OmNzmclkk", "parentPublication": { "id": "proceedings/ipdpsw/2010/6533/0", "title": "2010 IEEE International Symposium on Parallel & Distributed Processing, Workshops and Phd Forum (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/08/07243356", "title": "Fast Coherent Particle Advection through Time-Varying Unstructured Flow Datasets", "doi": null, "abstractUrl": "/journal/tg/2016/08/07243356/13rRUx0xPIN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2019/2605/0/08944355", "title": "A Lifeline-Based Approach for Work Requesting and Parallel Particle Advection", "doi": null, "abstractUrl": "/proceedings-article/ldav/2019/08944355/1grOEWdpXq0", "parentPublication": { "id": "proceedings/ldav/2019/2605/0", "title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2019/2605/0/08944378", "title": "Parallel Particle Advection and Lagrangian Analysis for 3D-PLI Fiber Orientation Maps", "doi": null, "abstractUrl": "/proceedings-article/ldav/2019/08944378/1grOF71jiog", "parentPublication": { "id": "proceedings/ldav/2019/2605/0", "title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2021/9666/0/966600a542", "title": "Optimizing Barrier Synchronization on ARMv8 Many-Core Architectures", "doi": null, "abstractUrl": "/proceedings-article/cluster/2021/966600a542/1xFuXyOqFDq", "parentPublication": { "id": "proceedings/cluster/2021/9666/0", "title": "2021 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCbU3aP", "title": "2009 WRI Global Congress on Intelligent Systems", "acronym": "gcis", "groupId": "1002842", "volume": "2", "displayVolume": "2", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNAqkSES", "doi": "10.1109/GCIS.2009.33", "title": "Multiphase Segmentation of SAR Images with Level Set Evolution", "normalizedTitle": "Multiphase Segmentation of SAR Images with Level Set Evolution", "abstract": "Segmentation is a fundamental problem for the automatic interpretation of synthetic aperture radar (SAR) images. We propose an improved multiphase segmentation method for SAR images based on Chan-Vese level set evolution model. The costly re-initialization procedure of signed distance function in Chan-Vese model is eliminated through introducing a penalty term. The proposed method has two advantages over traditional multiphase segmentation scheme with level set evolution. First, regions with close gray scale could be segmented into different class correctly by utilizing several binary segmentations of irregular regions. Second, falsely segmented fractions with one pixel width on the edges of uniform regions are eliminated with morphological open operation. The numerical algorithm using finite differences is also presented, which has been applied to simulated images and real SAR images with more promising results.", "abstracts": [ { "abstractType": "Regular", "content": "Segmentation is a fundamental problem for the automatic interpretation of synthetic aperture radar (SAR) images. We propose an improved multiphase segmentation method for SAR images based on Chan-Vese level set evolution model. The costly re-initialization procedure of signed distance function in Chan-Vese model is eliminated through introducing a penalty term. The proposed method has two advantages over traditional multiphase segmentation scheme with level set evolution. First, regions with close gray scale could be segmented into different class correctly by utilizing several binary segmentations of irregular regions. Second, falsely segmented fractions with one pixel width on the edges of uniform regions are eliminated with morphological open operation. The numerical algorithm using finite differences is also presented, which has been applied to simulated images and real SAR images with more promising results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Segmentation is a fundamental problem for the automatic interpretation of synthetic aperture radar (SAR) images. We propose an improved multiphase segmentation method for SAR images based on Chan-Vese level set evolution model. The costly re-initialization procedure of signed distance function in Chan-Vese model is eliminated through introducing a penalty term. The proposed method has two advantages over traditional multiphase segmentation scheme with level set evolution. First, regions with close gray scale could be segmented into different class correctly by utilizing several binary segmentations of irregular regions. Second, falsely segmented fractions with one pixel width on the edges of uniform regions are eliminated with morphological open operation. The numerical algorithm using finite differences is also presented, which has been applied to simulated images and real SAR images with more promising results.", "fno": "3571b447", "keywords": [ "SAR Image", "Segmentation", "Active Contour", "Level Set", "Snake" ], "authors": [ { "affiliation": null, "fullName": "Wang Xiaoliang", "givenName": "Wang", "surname": "Xiaoliang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Li Chunsheng", "givenName": "Li", "surname": "Chunsheng", "__typename": "ArticleAuthorType" } ], "idPrefix": "gcis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-05-01T00:00:00", "pubType": "proceedings", "pages": "447-452", "year": "2009", "issn": null, "isbn": "978-0-7695-3571-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3571b441", "articleId": "12OmNqGA58C", "__typename": "AdjacentArticleType" }, "next": { "fno": "3571b453", "articleId": "12OmNxTEiQo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/snpd/2016/2239/0/07515903", "title": "Image segmentation based on local Chan-Vese model optimized by max-flow algorithm", "doi": null, "abstractUrl": "/proceedings-article/snpd/2016/07515903/12OmNA1DMmZ", "parentPublication": { "id": "proceedings/snpd/2016/2239/0", "title": "2016 17th IEEE/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04270429", "title": "Multiphase Segmentation of Deformation using Logarithmic Priors", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270429/12OmNAlvI0e", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icinis/2008/3391/0/3391a507", "title": "An Improved C-V Image Segmentation Method Based on Level Set Model", "doi": null, "abstractUrl": "/proceedings-article/icinis/2008/3391a507/12OmNqGitU2", "parentPublication": { "id": "proceedings/icinis/2008/3391/0", "title": "Intelligent Networks and Intelligent Systems, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit/2012/4858/0/4858a778", "title": "A Fast Target Detection Method for SAR Image", "doi": null, "abstractUrl": "/proceedings-article/cit/2012/4858a778/12OmNqJq4sw", "parentPublication": { "id": "proceedings/cit/2012/4858/0", "title": "Computer and Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/his/2009/3745/1/3745a243", "title": "Shape-Based Level Set Method for Image Segmentation", "doi": null, "abstractUrl": "/proceedings-article/his/2009/3745a243/12OmNxWcH4M", "parentPublication": { "id": "proceedings/his/2009/3745/1", "title": "Hybrid Intelligent Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2013/5050/0/5050a201", "title": "Improved Vese-Chan Model for Fast Image Segmentation Based on Split Bregman Method", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a201/12OmNxaNGm4", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/2/3336d106", "title": "The Improvement of C-V Level Set Method for Image Segmentation", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336d106/12OmNyuPLkD", "parentPublication": { "id": "proceedings/csse/2008/3336/6", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mvhi/2010/4009/0/4009a006", "title": "Color Image Segmentation Based on a New Geometric Active Contour Model", "doi": null, "abstractUrl": "/proceedings-article/mvhi/2010/4009a006/12OmNyyO8OO", "parentPublication": { "id": "proceedings/mvhi/2010/4009/0", "title": "Machine Vision and Human-machine Interface, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mue/2009/3658/0/3658a041", "title": "An Innovative Variational Level Set Model for Multiphase Image Segmentation", "doi": null, "abstractUrl": "/proceedings-article/mue/2009/3658a041/12OmNz3bdCk", "parentPublication": { "id": "proceedings/mue/2009/3658/0", "title": "Multimedia and Ubiquitous Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/10/ttp2012102046", "title": "SAR Image Segmentation Based on Level Set Approach and {\\cal G}_A^0 Model", "doi": null, "abstractUrl": "/journal/tp/2012/10/ttp2012102046/13rRUxBa5d6", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }