data dict |
|---|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJdbzCNHUc",
"doi": "10.1109/VRW55335.2022.00070",
"title": "My Eyes Hurt: Effects of Jitter in 3D Gaze Tracking",
"normalizedTitle": "My Eyes Hurt: Effects of Jitter in 3D Gaze Tracking",
"abstract": "Jitter, small fluctuations in the signal, is one of the major sources for a decrease in motor performance and a negative user experience in virtual reality (VR) systems. Current technologies still cannot eliminate jitter in VR systems, especially in the eye-gaze tracking systems embedded in many head-mounted displays. In this work, we used an HTC Vive Pro Eye, artificially added 0.5°, 1°, and 1.5° jitter to the eye-tracking data, and analyzed user performance in an ISO 9241: 411 pointing task with targets at 1 or 2 meters visual distance using angular Fitts' law. The results showed that the user's error rate significantly increases with increased jitter levels. No significant difference was observed for time and throughput. Additionally, we observed a significant decrease in performance in terms of time, error rate, and accuracy for the more distant targets. We hope that our results guide researchers, practitioners, and developers towards better gaze-tracking-based VR applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Jitter, small fluctuations in the signal, is one of the major sources for a decrease in motor performance and a negative user experience in virtual reality (VR) systems. Current technologies still cannot eliminate jitter in VR systems, especially in the eye-gaze tracking systems embedded in many head-mounted displays. In this work, we used an HTC Vive Pro Eye, artificially added 0.5°, 1°, and 1.5° jitter to the eye-tracking data, and analyzed user performance in an ISO 9241: 411 pointing task with targets at 1 or 2 meters visual distance using angular Fitts' law. The results showed that the user's error rate significantly increases with increased jitter levels. No significant difference was observed for time and throughput. Additionally, we observed a significant decrease in performance in terms of time, error rate, and accuracy for the more distant targets. We hope that our results guide researchers, practitioners, and developers towards better gaze-tracking-based VR applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Jitter, small fluctuations in the signal, is one of the major sources for a decrease in motor performance and a negative user experience in virtual reality (VR) systems. Current technologies still cannot eliminate jitter in VR systems, especially in the eye-gaze tracking systems embedded in many head-mounted displays. In this work, we used an HTC Vive Pro Eye, artificially added 0.5°, 1°, and 1.5° jitter to the eye-tracking data, and analyzed user performance in an ISO 9241: 411 pointing task with targets at 1 or 2 meters visual distance using angular Fitts' law. The results showed that the user's error rate significantly increases with increased jitter levels. No significant difference was observed for time and throughput. Additionally, we observed a significant decrease in performance in terms of time, error rate, and accuracy for the more distant targets. We hope that our results guide researchers, practitioners, and developers towards better gaze-tracking-based VR applications.",
"fno": "840200a310",
"keywords": [
"Gaze Tracking",
"Helmet Mounted Displays",
"Human Computer Interaction",
"Jitter",
"Virtual Reality",
"3 D Gaze Tracking",
"Motor Performance",
"Negative User Experience",
"Virtual Reality Systems",
"VR Systems",
"Eye Gaze Tracking Systems",
"Head Mounted Displays",
"HTC Vive Pro Eye",
"Eye Tracking Data",
"User Performance",
"Angular Fitts Law",
"Jitter Levels",
"Gaze Tracking Based VR Applications",
"ISO 9241 411 Pointing Task",
"Performance Evaluation",
"Visualization",
"Three Dimensional Displays",
"Error Analysis",
"Conferences",
"Gaze Tracking",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Kadir Has University,Department of Mechatronics Engineering",
"fullName": "Moaaz Hudhud Mughrabi",
"givenName": "Moaaz Hudhud",
"surname": "Mughrabi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Interactive Arts & Technology, Simon Fraser University",
"fullName": "Aunnoy K Mutasim",
"givenName": "Aunnoy K",
"surname": "Mutasim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Interactive Arts & Technology, Simon Fraser University",
"fullName": "Wolfgang Stuerzlinger",
"givenName": "Wolfgang",
"surname": "Stuerzlinger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kadir Has University,Department of Mechatronics Engineering",
"fullName": "Anil Ufuk Batmaz",
"givenName": "Anil Ufuk",
"surname": "Batmaz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "310-315",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a304",
"articleId": "1CJetSxfyi4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a316",
"articleId": "1CJdGDnvHjy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wkdd/2009/3543/0/3543a594",
"title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn",
"parentPublication": {
"id": "proceedings/wkdd/2009/3543/0",
"title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699248",
"title": "DualGaze: Addressing the Midas Touch Problem in Gaze Mediated VR Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699248/19F1R5RaLFS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a832",
"title": "GazeDock: Gaze-Only Menu Selection in Virtual Reality using Auto-Triggering Peripheral Menu",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a832/1CJbR6qnKdW",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a399",
"title": "Real-Time Gaze Tracking with Event-Driven Eye Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a399/1CJbTrAdAju",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a170",
"title": "Development and evaluation of car training system using VR and eye tracking technology",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a170/1GU75yVJubS",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a787",
"title": "VRDoc: Gaze-based Interactions for VR Reading Experience",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a787/1JrRgFp6G2s",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798273",
"title": "Required Accuracy of Gaze Tracking for Varifocal Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798273/1cJ0T4CUJTq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089578",
"title": "Exploring Eye Gaze Visualization Techniques for Identifying Distracted Students in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089578/1jIxfimnIaY",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09389490",
"title": "Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09389490/1smZT5W55V6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1FUUlAQhJwk",
"title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"acronym": "icceai",
"groupId": "1843184",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1FUVGuLyjra",
"doi": "10.1109/ICCEAI55464.2022.00172",
"title": "Trolley Remote Control System Using Eye Motion Tracking",
"normalizedTitle": "Trolley Remote Control System Using Eye Motion Tracking",
"abstract": "In this paper, a trolley remote control system based on eye tracking technology is proposed that controls the direction of the trolley's movement according to the gaze direction of the human eye. The system's graphical user interface contains eight arrows pointing in different directions that function as the gaze targets for moving the trolley in the corresponding direction. This paper compares two algorithms for detecting fixation points. One method clusters the gaze position of the human eye recorded by the eye tracker in the preset time window, and the other method directly uses the fixation point determined by the eye tracker's own software to construct a fixation heat map and then perform image processing. Through experiments, it was found that the average target detection accuracy of the system using the clustering algorithm (99.78%) is 3.13% higher than that of the system using the second method and the difference between the performances of the algorithms was found to be significant. The results demonstrate that in the trolley remote control scenario, the use of eye tracking technology with a clustering algorithm can realize efficient and natural human-computer interaction. Moreover, such a system has good sensitivity and stability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, a trolley remote control system based on eye tracking technology is proposed that controls the direction of the trolley's movement according to the gaze direction of the human eye. The system's graphical user interface contains eight arrows pointing in different directions that function as the gaze targets for moving the trolley in the corresponding direction. This paper compares two algorithms for detecting fixation points. One method clusters the gaze position of the human eye recorded by the eye tracker in the preset time window, and the other method directly uses the fixation point determined by the eye tracker's own software to construct a fixation heat map and then perform image processing. Through experiments, it was found that the average target detection accuracy of the system using the clustering algorithm (99.78%) is 3.13% higher than that of the system using the second method and the difference between the performances of the algorithms was found to be significant. The results demonstrate that in the trolley remote control scenario, the use of eye tracking technology with a clustering algorithm can realize efficient and natural human-computer interaction. Moreover, such a system has good sensitivity and stability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, a trolley remote control system based on eye tracking technology is proposed that controls the direction of the trolley's movement according to the gaze direction of the human eye. The system's graphical user interface contains eight arrows pointing in different directions that function as the gaze targets for moving the trolley in the corresponding direction. This paper compares two algorithms for detecting fixation points. One method clusters the gaze position of the human eye recorded by the eye tracker in the preset time window, and the other method directly uses the fixation point determined by the eye tracker's own software to construct a fixation heat map and then perform image processing. Through experiments, it was found that the average target detection accuracy of the system using the clustering algorithm (99.78%) is 3.13% higher than that of the system using the second method and the difference between the performances of the algorithms was found to be significant. The results demonstrate that in the trolley remote control scenario, the use of eye tracking technology with a clustering algorithm can realize efficient and natural human-computer interaction. Moreover, such a system has good sensitivity and stability.",
"fno": "680300a826",
"keywords": [
"Brain Computer Interfaces",
"Gaze Tracking",
"Graphical User Interfaces",
"Human Computer Interaction",
"Image Processing",
"Object Detection",
"Pattern Clustering",
"Telecontrol",
"Trolleys",
"Gaze Position Clustering",
"Human Computer Interaction",
"Image Processing",
"Fixation Point Detection",
"Graphical User Interface",
"Clustering Algorithm",
"Target Detection Accuracy",
"Fixation Heat Map",
"Human Eye Gaze Direction",
"Eye Tracking Technology",
"Eye Motion Tracking",
"Trolley Remote Control System",
"Heating Systems",
"Target Tracking",
"Smoothing Methods",
"Image Processing",
"Software Algorithms",
"Clustering Algorithms",
"Gaze Tracking",
"Eye Tracking",
"Brain Computer Interface",
"DBSCAN",
"Gaussian Smoothing",
"Node Communication"
],
"authors": [
{
"affiliation": "School of Biological Science and Medical Engineering, Southeast University,Ministry of Education, Key Laboratory of Child Development and Learning Science,Nanjing,China",
"fullName": "Shuying Rao",
"givenName": "Shuying",
"surname": "Rao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Biological Science and Medical Engineering, Southeast University,Ministry of Education, Key Laboratory of Child Development and Learning Science,Nanjing,China",
"fullName": "Zichen Kong",
"givenName": "Zichen",
"surname": "Kong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Biological Science and Medical Engineering, Southeast University,Ministry of Education, Key Laboratory of Child Development and Learning Science,Nanjing,China",
"fullName": "Wenli Lan",
"givenName": "Wenli",
"surname": "Lan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Biological Science and Medical Engineering, Southeast University,Ministry of Education, Key Laboratory of Child Development and Learning Science,Nanjing,China",
"fullName": "Hui Yang",
"givenName": "Hui",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Biological Science and Medical Engineering, Southeast University,Ministry of Education, Key Laboratory of Child Development and Learning Science,Nanjing,China",
"fullName": "Yue Leng",
"givenName": "Yue",
"surname": "Leng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Biological Science and Medical Engineering, Southeast University,Ministry of Education, Key Laboratory of Child Development and Learning Science,Nanjing,China",
"fullName": "Sheng Ge",
"givenName": "Sheng",
"surname": "Ge",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icceai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "826-830",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6803-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "680300a821",
"articleId": "1FUUrudrk76",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "680300a831",
"articleId": "1FUVJMZncB2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/imis/2015/8873/0/8873a431",
"title": "Precise Exposure Control for Efficient Eye Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2015/8873a431/12OmNARiM0m",
"parentPublication": {
"id": "proceedings/imis/2015/8873/0",
"title": "2015 9th International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2012/4899/0/4899a167",
"title": "Detection of Gaze States: Fixation or Motion",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2012/4899a167/12OmNAtK4r1",
"parentPublication": {
"id": "proceedings/icdh/2012/4899/0",
"title": "4th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2010/4249/0/4249a048",
"title": "Implementation and Optimization of the Eye Gaze Tracking System Based on DM642",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2010/4249a048/12OmNs4S8I4",
"parentPublication": {
"id": "proceedings/icinis/2010/4249/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wkdd/2009/3543/0/3543a594",
"title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn",
"parentPublication": {
"id": "proceedings/wkdd/2009/3543/0",
"title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscmi/2015/9819/0/9819a126",
"title": "Predicting Consumer's Behavior Using Eye Tracking Data",
"doi": null,
"abstractUrl": "/proceedings-article/iscmi/2015/9819a126/12OmNzt0IGz",
"parentPublication": {
"id": "proceedings/iscmi/2015/9819/0",
"title": "2015 Second International Conference on Soft Computing and Machine Intelligence (ISCMI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a399",
"title": "Real-Time Gaze Tracking with Event-Driven Eye Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a399/1CJbTrAdAju",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2022/6803/0/680300a663",
"title": "Eye-tracking-based robotic arm control system",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2022/680300a663/1FUUvKoB2HS",
"parentPublication": {
"id": "proceedings/icceai/2022/6803/0",
"title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsme/2019/3094/0/309400a615",
"title": "Enhancing Eye Tracking of Source Code: A Specialized Fixation Filter for Source Code",
"doi": null,
"abstractUrl": "/proceedings-article/icsme/2019/309400a615/1fHlIz1I68E",
"parentPublication": {
"id": "proceedings/icsme/2019/3094/0",
"title": "2019 IEEE International Conference on Software Maintenance and Evolution (ICSME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ewdts/2020/9899/0/09225144",
"title": "Exploiting EEG Signals for Eye Motion Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ewdts/2020/09225144/1nWNWOWhzj2",
"parentPublication": {
"id": "proceedings/ewdts/2020/9899/0",
"title": "2020 IEEE East-West Design & Test Symposium (EWDTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1Ml2g3mcpNu",
"title": "2022 7th International Conference on Multimedia Communication Technologies (ICMCT)",
"acronym": "icmct",
"groupId": "10097037",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1Ml2gBSajpS",
"doi": "10.1109/ICMCT57031.2022.00010",
"title": "Eye Positioning System for PC Based on Autostereoscopy with Android",
"normalizedTitle": "Eye Positioning System for PC Based on Autostereoscopy with Android",
"abstract": "When combining with the Autostereoscopy technology of Android mobile devices, it is necessary to use the eye tracking technology to locate the position of the eye to switch the pictures on the Android side. When the face detection mode of camera2 of Android system is used for real-time eye tracking, the arithmetic power of cell phone will be relatively low, and it will cause lag when calculating the position of eye, so this paper combines computer vision processing to put the calculation of eye tracking and positioning on the computer terminal, and uses gpu to calculate, so it can have unlimited arithmetic power and higher efficiency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When combining with the Autostereoscopy technology of Android mobile devices, it is necessary to use the eye tracking technology to locate the position of the eye to switch the pictures on the Android side. When the face detection mode of camera2 of Android system is used for real-time eye tracking, the arithmetic power of cell phone will be relatively low, and it will cause lag when calculating the position of eye, so this paper combines computer vision processing to put the calculation of eye tracking and positioning on the computer terminal, and uses gpu to calculate, so it can have unlimited arithmetic power and higher efficiency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When combining with the Autostereoscopy technology of Android mobile devices, it is necessary to use the eye tracking technology to locate the position of the eye to switch the pictures on the Android side. When the face detection mode of camera2 of Android system is used for real-time eye tracking, the arithmetic power of cell phone will be relatively low, and it will cause lag when calculating the position of eye, so this paper combines computer vision processing to put the calculation of eye tracking and positioning on the computer terminal, and uses gpu to calculate, so it can have unlimited arithmetic power and higher efficiency.",
"fno": "736200a005",
"keywords": [
"Target Tracking",
"Switches",
"Gaze Tracking",
"Position Measurement",
"Cameras",
"Real Time Systems",
"Multimedia Communication",
"Eye Tracking",
"Computer Vision",
"Data Transmission",
"Mtcnn",
"Image Switching"
],
"authors": [
{
"affiliation": "College of Computer Science and Technology. Chengdu Jincheng College Chengdu Jincheng College,Sichuan,China",
"fullName": "Ke Wang",
"givenName": "Ke",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Computer Science and Technology. Chengdu Jincheng College Chengdu Jincheng College,Sichuan,China",
"fullName": "Yu Ting Chen",
"givenName": "Yu Ting",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Computer Science and Technology. Chengdu Jincheng College Chengdu Jincheng College,Sichuan,China",
"fullName": "Zong Hai Pan",
"givenName": "Zong Hai",
"surname": "Pan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Computer Science and Technology. Chengdu Jincheng College Chengdu Jincheng College,Sichuan,China",
"fullName": "Fei Li",
"givenName": "Fei",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Computer Science and Technology. Chengdu Jincheng College Chengdu Jincheng College,Sichuan,China",
"fullName": "Chun Mei Lan",
"givenName": "Chun Mei",
"surname": "Lan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "5-9",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-7362-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "736200a001",
"articleId": "1Ml2hws1aRa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "736200a010",
"articleId": "1Ml2hqurrva",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/imis/2015/8873/0/8873a431",
"title": "Precise Exposure Control for Efficient Eye Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2015/8873a431/12OmNARiM0m",
"parentPublication": {
"id": "proceedings/imis/2015/8873/0",
"title": "2015 9th International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2015/7343/0/07314288",
"title": "Challenges and Perspectives in Big Eye-Movement Data Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2015/07314288/12OmNButq1p",
"parentPublication": {
"id": "proceedings/bdva/2015/7343/0",
"title": "2015 Big Data Visual Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etvis/2016/4731/0/07851171",
"title": "Eye tracking data in multimedia containers for instantaneous visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/etvis/2016/07851171/12OmNCmGNXy",
"parentPublication": {
"id": "proceedings/etvis/2016/4731/0",
"title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2010/4249/0/4249a048",
"title": "Implementation and Optimization of the Eye Gaze Tracking System Based on DM642",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2010/4249a048/12OmNs4S8I4",
"parentPublication": {
"id": "proceedings/icinis/2010/4249/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vissoft/2017/1003/0/1003a022",
"title": "iTraceVis: Visualizing Eye Movement Data Within Eclipse",
"doi": null,
"abstractUrl": "/proceedings-article/vissoft/2017/1003a022/12OmNxWcHcx",
"parentPublication": {
"id": "proceedings/vissoft/2017/1003/0",
"title": "2017 IEEE Working Conference on Software Visualization (VISSOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcity/2015/1893/0/1893a494",
"title": "An Advertisement Video Analysis System Based on Eye-Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/smartcity/2015/1893a494/12OmNzVGcJt",
"parentPublication": {
"id": "proceedings/smartcity/2015/1893/0",
"title": "2015 IEEE International Conference on Smart City/SocialCom/SustainCom (SmartCity)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-smartcity-dss/2018/6614/0/661400a306",
"title": "A New Human Eye Tracking Method Based on Tracking Module Feedback TLD Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2018/661400a306/183rAdksvlO",
"parentPublication": {
"id": "proceedings/hpcc-smartcity-dss/2018/6614/0",
"title": "2018 IEEE 20th International Conference on High Performance Computing and Communications; IEEE 16th International Conference on Smart City; IEEE 4th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2022/6803/0/680300a826",
"title": "Trolley Remote Control System Using Eye Motion Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2022/680300a826/1FUVGuLyjra",
"parentPublication": {
"id": "proceedings/icceai/2022/6803/0",
"title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049755",
"title": "Leveling the Playing Field: A Comparative Reevaluation of Unmodified Eye Tracking as an Input and Interaction Modality for VR",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049755/1KYoozDk3v2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2021/4065/0/406500a188",
"title": "ML-based classification of eye movement patterns during reading using eye tracking data from an Apple iPad device: Perspective machine learning algorithm needed for reading quality analytics app on an iPad with built-in eye tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2021/406500a188/1yBF5P8nrXy",
"parentPublication": {
"id": "proceedings/cw/2021/4065/0",
"title": "2021 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxoACmybu",
"doi": "10.1109/VRW50115.2020.00029",
"title": "A Methodology of Eye Gazing Attention Determination for VR Training",
"normalizedTitle": "A Methodology of Eye Gazing Attention Determination for VR Training",
"abstract": "Many systems have successfully used virtual reality (VR), eye tracking system and applied behavioral analysis in Autism Spectrum Disorders (ASD) therapies. Recognizing eye gazing patterns is critical. When people look at different objects, their eyes may stay on some objects for a long time or glance at some objects. This paper proposes a methodology of eye gazing attention determination which uses different time thresholds to track the eyes pattern of autistic children in adaptive virtual environments therapy systems. Moreover, by setting up hierarchical structure of virtual objects, we increase the reliability and immersion of the eye gaze attention determination methodology in VR attention training.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many systems have successfully used virtual reality (VR), eye tracking system and applied behavioral analysis in Autism Spectrum Disorders (ASD) therapies. Recognizing eye gazing patterns is critical. When people look at different objects, their eyes may stay on some objects for a long time or glance at some objects. This paper proposes a methodology of eye gazing attention determination which uses different time thresholds to track the eyes pattern of autistic children in adaptive virtual environments therapy systems. Moreover, by setting up hierarchical structure of virtual objects, we increase the reliability and immersion of the eye gaze attention determination methodology in VR attention training.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many systems have successfully used virtual reality (VR), eye tracking system and applied behavioral analysis in Autism Spectrum Disorders (ASD) therapies. Recognizing eye gazing patterns is critical. When people look at different objects, their eyes may stay on some objects for a long time or glance at some objects. This paper proposes a methodology of eye gazing attention determination which uses different time thresholds to track the eyes pattern of autistic children in adaptive virtual environments therapy systems. Moreover, by setting up hierarchical structure of virtual objects, we increase the reliability and immersion of the eye gaze attention determination methodology in VR attention training.",
"fno": "09090559",
"keywords": [
"Training",
"Gaze Tracking",
"Virtual Environments",
"Medical Treatment",
"Autism",
"Resists",
"Eye Tracking System",
"Virtual Reality",
"Autism Spectrum Disorders ASD",
"Time Thresholds",
"H 52 Information Interfaces And Presentation",
"User Interfaces Evaluation Methodology"
],
"authors": [
{
"affiliation": "Kennesaw State University",
"fullName": "Jingjing Zhang",
"givenName": "Jingjing",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kennesaw State University",
"fullName": "Meg’n Mullikin",
"givenName": "Meg’n",
"surname": "Mullikin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kennesaw State University",
"fullName": "Yi Li",
"givenName": "Yi",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kennesaw State University",
"fullName": "Chao Mei",
"givenName": "Chao",
"surname": "Mei",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "138-141",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090563",
"articleId": "1jIxutrshaw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090485",
"articleId": "1jIxqx9VfS8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/mobicase/2014/024/0/07026291",
"title": "Eye contact reminder system for people with autism",
"doi": null,
"abstractUrl": "/proceedings-article/mobicase/2014/07026291/12OmNzaQodl",
"parentPublication": {
"id": "proceedings/mobicase/2014/024/0",
"title": "2014 6th International Conference on Mobile Computing, Applications and Services (MobiCASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wkdd/2009/3543/0/3543a594",
"title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn",
"parentPublication": {
"id": "proceedings/wkdd/2009/3543/0",
"title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2014/03/06851182",
"title": "Joint Attention Simulation Using Eye-Tracking and Virtual Humans",
"doi": null,
"abstractUrl": "/journal/ta/2014/03/06851182/13rRUxE04s0",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a493",
"title": "Eye Tracking-based LSTM for Locomotion Prediction in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a493/1CJcrKWnUtO",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798327",
"title": "Eye-gaze-triggered Visual Cues to Restore Attention in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798327/1cJ0HmmdfUY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797896",
"title": "Pedagogical Agent Responsive to Eye Tracking in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797896/1cJ1ceQVCtG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a422",
"title": "Behavior Analysis of Indoor Escape Route-Finding Based on Head-Mounted VR and Eye Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a422/1ehBGoaPHhK",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090461",
"title": "Front Camera Eye Tracking For Mobile VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a707",
"title": "[DC] Eye Fixation Forecasting in Task-Oriented Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a707/1tnWQmeJsZi",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2021/4121/0/412100a142",
"title": "Classification of Autism Spectrum Disorder Severity Using Eye Tracking Data Based on Visual Attention Model",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2021/412100a142/1vb8SsuZlrG",
"parentPublication": {
"id": "proceedings/cbms/2021/4121/0",
"title": "2021 IEEE 34th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tmhi3ly74c",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tmjH1aA4dG",
"doi": "10.1109/ICPR48806.2021.9412066",
"title": "Detection and Correspondence Matching of Corneal Reflections for Eye Tracking Using Deep Learning",
"normalizedTitle": "Detection and Correspondence Matching of Corneal Reflections for Eye Tracking Using Deep Learning",
"abstract": "Eye tracking systems that estimate the point-of-gaze are essential in extended reality (XR) systems as they enable new interaction paradigms and technological improvements. It is important for these systems to maintain accuracy when the headset moves relative to the head (known as device slippage) due to head movements or user adjustment. One of the most accurate eye tracking techniques, which is also insensitive to shifts of the system relative to the head, uses two or more infrared (IR) light emitting diodes to illuminate the eye and an IR camera to capture images of the eye. An essential step in estimating the point-of-gaze in these systems is the precise determination of the location of two or more corneal reflections (virtual images of the IR-LEDs that illuminate the eye) in images of the eye. Eye trackers tend to have multiple light sources to ensure at least one pair of reflections for each gaze position. The use of multiple light sources introduces a difficult problem: the need to match the corneal reflections with the corresponding light source over the range of expected eye movements. Corneal reflection detection and matching often fail in XR systems due to the proximity of camera and steep illumination angles of light sources with respect to the eye. The failures are caused by corneal reflections having varying shape and intensity levels or disappearance due to rotation of the eye, or the presence of spurious reflections. We have developed a fully convolutional neural network, based on the UNET architecture, that solves the detection and matching problem in the presence of spurious and missing reflections. Eye images of 25 people were collected in a virtual reality headset using a binocular eye tracking module consisting of five infrared light sources per eye. A set of 4,000 eye images were manually labelled for each of the corneal reflections, and data augmentation was used to generate a dataset of 40,000 images. The network is able to correctly identify and match 91% of corneal reflections present in the test set. This is comparable to a state-of-the-art deep learning system, but our approach requires 33 times less memory and executes 10 times faster. The proposed algorithm, when used in an eye tracker in a VR system, achieved an average mean absolute gaze error of 1°. This is a significant improvement over the state-of-the-art learning-based XR eye tracking systems that have reported gaze errors of 2-3°.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Eye tracking systems that estimate the point-of-gaze are essential in extended reality (XR) systems as they enable new interaction paradigms and technological improvements. It is important for these systems to maintain accuracy when the headset moves relative to the head (known as device slippage) due to head movements or user adjustment. One of the most accurate eye tracking techniques, which is also insensitive to shifts of the system relative to the head, uses two or more infrared (IR) light emitting diodes to illuminate the eye and an IR camera to capture images of the eye. An essential step in estimating the point-of-gaze in these systems is the precise determination of the location of two or more corneal reflections (virtual images of the IR-LEDs that illuminate the eye) in images of the eye. Eye trackers tend to have multiple light sources to ensure at least one pair of reflections for each gaze position. The use of multiple light sources introduces a difficult problem: the need to match the corneal reflections with the corresponding light source over the range of expected eye movements. Corneal reflection detection and matching often fail in XR systems due to the proximity of camera and steep illumination angles of light sources with respect to the eye. The failures are caused by corneal reflections having varying shape and intensity levels or disappearance due to rotation of the eye, or the presence of spurious reflections. We have developed a fully convolutional neural network, based on the UNET architecture, that solves the detection and matching problem in the presence of spurious and missing reflections. Eye images of 25 people were collected in a virtual reality headset using a binocular eye tracking module consisting of five infrared light sources per eye. A set of 4,000 eye images were manually labelled for each of the corneal reflections, and data augmentation was used to generate a dataset of 40,000 images. The network is able to correctly identify and match 91% of corneal reflections present in the test set. This is comparable to a state-of-the-art deep learning system, but our approach requires 33 times less memory and executes 10 times faster. The proposed algorithm, when used in an eye tracker in a VR system, achieved an average mean absolute gaze error of 1°. This is a significant improvement over the state-of-the-art learning-based XR eye tracking systems that have reported gaze errors of 2-3°.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Eye tracking systems that estimate the point-of-gaze are essential in extended reality (XR) systems as they enable new interaction paradigms and technological improvements. It is important for these systems to maintain accuracy when the headset moves relative to the head (known as device slippage) due to head movements or user adjustment. One of the most accurate eye tracking techniques, which is also insensitive to shifts of the system relative to the head, uses two or more infrared (IR) light emitting diodes to illuminate the eye and an IR camera to capture images of the eye. An essential step in estimating the point-of-gaze in these systems is the precise determination of the location of two or more corneal reflections (virtual images of the IR-LEDs that illuminate the eye) in images of the eye. Eye trackers tend to have multiple light sources to ensure at least one pair of reflections for each gaze position. The use of multiple light sources introduces a difficult problem: the need to match the corneal reflections with the corresponding light source over the range of expected eye movements. Corneal reflection detection and matching often fail in XR systems due to the proximity of camera and steep illumination angles of light sources with respect to the eye. The failures are caused by corneal reflections having varying shape and intensity levels or disappearance due to rotation of the eye, or the presence of spurious reflections. We have developed a fully convolutional neural network, based on the UNET architecture, that solves the detection and matching problem in the presence of spurious and missing reflections. Eye images of 25 people were collected in a virtual reality headset using a binocular eye tracking module consisting of five infrared light sources per eye. A set of 4,000 eye images were manually labelled for each of the corneal reflections, and data augmentation was used to generate a dataset of 40,000 images. The network is able to correctly identify and match 91% of corneal reflections present in the test set. This is comparable to a state-of-the-art deep learning system, but our approach requires 33 times less memory and executes 10 times faster. The proposed algorithm, when used in an eye tracker in a VR system, achieved an average mean absolute gaze error of 1°. This is a significant improvement over the state-of-the-art learning-based XR eye tracking systems that have reported gaze errors of 2-3°.",
"fno": "09412066",
"keywords": [
"Computer Vision",
"Eye",
"Gaze Tracking",
"Learning Artificial Intelligence",
"Light Emitting Diodes",
"Light Sources",
"Neural Nets",
"Object Detection",
"Optical Tracking",
"Virtual Reality",
"Corneal Reflections",
"Eye Tracking Systems",
"Point Of Gaze",
"Extended Reality Systems",
"Accurate Eye Tracking Techniques",
"Eye Tracker",
"Multiple Light Sources",
"Corresponding Light Source",
"Expected Eye Movements",
"XR Systems",
"Binocular Eye Tracking Module",
"Infrared Light Sources",
"4 Eye Images",
"000 Eye Images",
"State Of The Art Deep Learning System",
"Headphones",
"Head",
"Tracking",
"Memory Management",
"Lighting",
"Gaze Tracking",
"Reflection",
"Eye Tracking",
"Corneal Reflections",
"Semantic Segmentation",
"Virtual Reality",
"Pattern Recognition"
],
"authors": [
{
"affiliation": "University of Toronto,Department of Electrical and Computer Engineering,Toronto,Canada",
"fullName": "Soumil Chugh",
"givenName": "Soumil",
"surname": "Chugh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Toronto,Department of Electrical and Computer Engineering,Toronto,Canada",
"fullName": "Braiden Brousseau",
"givenName": "Braiden",
"surname": "Brousseau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Toronto,Department of Electrical and Computer Engineering,Toronto,Canada",
"fullName": "Jonathan Rose",
"givenName": "Jonathan",
"surname": "Rose",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ophthalmology and Vision Sciences, University of Toronto,Department of Electrical and Computer Engineering,Toronto,Canada",
"fullName": "Moshe Eizenman",
"givenName": "Moshe",
"surname": "Eizenman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "2210-2217",
"year": "2021",
"issn": "1051-4651",
"isbn": "978-1-7281-8808-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09412258",
"articleId": "1tmiTV5IMb6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09412986",
"articleId": "1tmjn8Z0OFW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/gcis/2009/3571/2/3571b133",
"title": "Key Techniques of Eye Gaze Tracking Based on Pupil Corneal Reflection",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571b133/12OmNA0vo1q",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/2",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imis/2015/8873/0/8873a431",
"title": "Precise Exposure Control for Efficient Eye Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2015/8873a431/12OmNARiM0m",
"parentPublication": {
"id": "proceedings/imis/2015/8873/0",
"title": "2015 9th International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926684",
"title": "A Statistical Approach to Continuous Self-Calibrating Eye Gaze Tracking for Head-Mounted Virtual Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926684/12OmNvlxJrb",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169846",
"title": "Non-calibrated and real-time human view estimation using a mobile corneal imaging camera",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169846/12OmNxu6pai",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2002/1602/0/16020101",
"title": "Non-Contact Eye Gaze Tracking System by Mapping of Corneal Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020101/12OmNzgwmIY",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500d937",
"title": "Event-Based Kilohertz Eye Tracking using Coded Differential Lighting",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500d937/1B13uiL4IUM",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a375",
"title": "Neural 3D Gaze: 3D Pupil Localization and Gaze Tracking based on Anatomical Eye Model and Neural Refraction Correction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a375/1JrQRCijhMk",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090544",
"title": "Removal of the Infrared Light Reflection of Eyeglass Using Multi-Channel CycleGAN Applied for the Gaze Estimation Images",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090544/1jIxvetbThe",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090461",
"title": "Front Camera Eye Tracking For Mobile VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ewdts/2020/9899/0/09225144",
"title": "Exploiting EEG Signals for Eye Motion Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ewdts/2020/09225144/1nWNWOWhzj2",
"parentPublication": {
"id": "proceedings/ewdts/2020/9899/0",
"title": "2020 IEEE East-West Design & Test Symposium (EWDTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCmpcMP",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"acronym": "iccp",
"groupId": "1800125",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAFWOQu",
"doi": "10.1109/ICCPHOT.2011.5753120",
"title": "Modeling and removing spatially-varying optical blur",
"normalizedTitle": "Modeling and removing spatially-varying optical blur",
"abstract": "Photo deblurring has been a major research topic in the past few years. So far, existing methods have focused on removing the blur due to camera shake and object motion. In this paper, we show that the optical system of the camera also generates significant blur, even with professional lenses. We introduce a method to estimate the blur kernel densely over the image and across multiple aperture and zoom settings. Our measures show that the blur kernel can have a non-negligible spread, even with top-of-the-line equipment, and that it varies nontrivially over this domain. In particular, the spatial variations are not radially symmetric and not even left-right symmetric. We develop and compare two models of the optical blur, each of them having its own advantages. We show that our models predict accurate blur kernels that can be used to restore photos. We demonstrate that we can produce images that are more uniformly sharp unlike those produced with spatially-invariant deblurring techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Photo deblurring has been a major research topic in the past few years. So far, existing methods have focused on removing the blur due to camera shake and object motion. In this paper, we show that the optical system of the camera also generates significant blur, even with professional lenses. We introduce a method to estimate the blur kernel densely over the image and across multiple aperture and zoom settings. Our measures show that the blur kernel can have a non-negligible spread, even with top-of-the-line equipment, and that it varies nontrivially over this domain. In particular, the spatial variations are not radially symmetric and not even left-right symmetric. We develop and compare two models of the optical blur, each of them having its own advantages. We show that our models predict accurate blur kernels that can be used to restore photos. We demonstrate that we can produce images that are more uniformly sharp unlike those produced with spatially-invariant deblurring techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Photo deblurring has been a major research topic in the past few years. So far, existing methods have focused on removing the blur due to camera shake and object motion. In this paper, we show that the optical system of the camera also generates significant blur, even with professional lenses. We introduce a method to estimate the blur kernel densely over the image and across multiple aperture and zoom settings. Our measures show that the blur kernel can have a non-negligible spread, even with top-of-the-line equipment, and that it varies nontrivially over this domain. In particular, the spatial variations are not radially symmetric and not even left-right symmetric. We develop and compare two models of the optical blur, each of them having its own advantages. We show that our models predict accurate blur kernels that can be used to restore photos. We demonstrate that we can produce images that are more uniformly sharp unlike those produced with spatially-invariant deblurring techniques.",
"fno": "05753120",
"keywords": [
"Image Restoration",
"Motion Estimation",
"Photographic Lenses",
"Spatially Varying Optical Blur",
"Photo Deblurring",
"Camera Shake",
"Object Motion",
"Optical System",
"Professional Lenses",
"Blur Kernel",
"Multiple Aperture",
"Zoom Settings",
"Kernel",
"Optical Imaging",
"Lenses",
"Apertures",
"Adaptive Optics",
"Robustness",
"Optical Sensors"
],
"authors": [
{
"affiliation": "Dartmouth College, USA",
"fullName": "Eric Kee",
"givenName": "Eric",
"surname": "Kee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Systems, Inc., USA",
"fullName": "Sylvain Paris",
"givenName": "Sylvain",
"surname": "Paris",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Systems, Inc., USA",
"fullName": "Simon Chen",
"givenName": "Simon",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Systems, Inc., USA",
"fullName": "Jue Wang",
"givenName": null,
"surname": "Jue Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2011",
"issn": null,
"isbn": "978-1-61284-707-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05753119",
"articleId": "12OmNz6ApbH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05753121",
"articleId": "12OmNAXPxZW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504749",
"title": "SharpView: Improved clarity of defocussed content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504749/12OmNBBhN9g",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460049",
"title": "SharpView: Improved clarity of defocused content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460049/12OmNBWzHQi",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460594",
"title": "Deblurring depth blur and motion blur simultaneously by using space-time coding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460594/12OmNBp52IS",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/2/00413542",
"title": "Monocular depth perception by evaluation of the blur in defocused images",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413542/12OmNx0RIU6",
"parentPublication": {
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2010/6984/0/05539954",
"title": "Analyzing spatially-varying blur",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2010/05539954/12OmNxGj9N7",
"parentPublication": {
"id": "proceedings/cvpr/2010/6984/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a612",
"title": "Self-Calibration of Optical Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a612/12OmNyQ7FPm",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/221P2B20",
"title": "Optical flow in the presence of spatially-varying motion blur",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/221P2B20/12OmNyQYtvR",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/10/08063973",
"title": "Dynamic Video Deblurring Using a Locally Adaptive Blur Model",
"doi": null,
"abstractUrl": "/journal/tp/2018/10/08063973/13rRUyeCkbD",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09151057",
"title": "VDFlow: Joint Learning for Optical Flow and Video Deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09151057/1lPH4TqYnlK",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d552",
"title": "Efficient Dynamic Scene Deblurring Using Spatially Variant Deconvolution Network With Optical Flow Guided Training",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d552/1m3nGOcNifu",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNs4S8wz",
"title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)",
"acronym": "iri",
"groupId": "1001046",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvq5jzp",
"doi": "10.1109/IRI.2014.7051914",
"title": "Towards ray optics formalization of optical imaging systems",
"normalizedTitle": "Towards ray optics formalization of optical imaging systems",
"abstract": "The verification of optical systems is an important issue due to their safety and financial critical nature (e.g., laser surgeries and space telescopes). Theorem proving offers an attractive solution to overcome the accuracy and soundness problems of traditional approaches like paper-and-pencil based proofs and computer simulation. However, existing formalizations of optics theories do not provide the facility to analyze optical imaging systems which describe the behavior of light ray within the system. In this paper, we present the ray optics formalization of cardinal points which are the most fundamental requirement to model imaging properties of optical systems. We also present the verification of cardinal points for a general system consisting of any number of optical components. For illustration purposes, we present the formal analysis of a thick lens.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The verification of optical systems is an important issue due to their safety and financial critical nature (e.g., laser surgeries and space telescopes). Theorem proving offers an attractive solution to overcome the accuracy and soundness problems of traditional approaches like paper-and-pencil based proofs and computer simulation. However, existing formalizations of optics theories do not provide the facility to analyze optical imaging systems which describe the behavior of light ray within the system. In this paper, we present the ray optics formalization of cardinal points which are the most fundamental requirement to model imaging properties of optical systems. We also present the verification of cardinal points for a general system consisting of any number of optical components. For illustration purposes, we present the formal analysis of a thick lens.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The verification of optical systems is an important issue due to their safety and financial critical nature (e.g., laser surgeries and space telescopes). Theorem proving offers an attractive solution to overcome the accuracy and soundness problems of traditional approaches like paper-and-pencil based proofs and computer simulation. However, existing formalizations of optics theories do not provide the facility to analyze optical imaging systems which describe the behavior of light ray within the system. In this paper, we present the ray optics formalization of cardinal points which are the most fundamental requirement to model imaging properties of optical systems. We also present the verification of cardinal points for a general system consisting of any number of optical components. For illustration purposes, we present the formal analysis of a thick lens.",
"fno": "07051914",
"keywords": [
"Optical Imaging",
"Optical Refraction",
"Optical Variables Control",
"Adaptive Optics",
"Optical Resonators"
],
"authors": [
{
"affiliation": "Department of Electrical and Computer Engineering, Concordia University, Montreal, Canada",
"fullName": "Umair Siddique",
"givenName": "Umair",
"surname": "Siddique",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Electrical and Computer Engineering, Concordia University, Montreal, Canada",
"fullName": "Sofiene Tahar",
"givenName": "Sofiene",
"surname": "Tahar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iri",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "378-385",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-5880-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07051913",
"articleId": "12OmNxR5UON",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07051915",
"articleId": "12OmNyen1jY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ca/2014/8205/0/07026258",
"title": "The Dispersion Coefficient of Air Refractive Index Measurement System Based on CCD Imaging Technology",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2014/07026258/12OmNAWpymQ",
"parentPublication": {
"id": "proceedings/ca/2014/8205/0",
"title": "2014 7th Conference on Control and Automation (CA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457553",
"title": "Krill-eye : Superposition compound eye for wide-angle imaging via GRIN lenses",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457553/12OmNAtK4gM",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bia/1994/5802/0/00315858",
"title": "A scalar function formulation for optical flow: applications to X-ray imaging",
"doi": null,
"abstractUrl": "/proceedings-article/bia/1994/00315858/12OmNrH1PDl",
"parentPublication": {
"id": "proceedings/bia/1994/5802/0",
"title": "Proceedings of IEEE Workshop on Biomedical Image Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hoti/2008/3380/0/3380a131",
"title": "Telecentric Optics for Free-Space Optical Link",
"doi": null,
"abstractUrl": "/proceedings-article/hoti/2008/3380a131/12OmNvkplaS",
"parentPublication": {
"id": "proceedings/hoti/2008/3380/0",
"title": "2008 16th IEEE Symposium on High Performance Interconnects",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2016/8623/0/07492865",
"title": "Keynote speakers: Computational imaging: How much imaging — How much computation?",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2016/07492865/12OmNx0RIPZ",
"parentPublication": {
"id": "proceedings/iccp/2016/8623/0",
"title": "2016 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/memsys/1997/3744/0/00581761",
"title": "Near field optics for nanometric sensing and control",
"doi": null,
"abstractUrl": "/proceedings-article/memsys/1997/00581761/12OmNxw5B9m",
"parentPublication": {
"id": "proceedings/memsys/1997/3744/0",
"title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bwcca/2014/4173/0/4173a371",
"title": "Optical Ray Tracing Based on Dijkstra Algorithm in Inhomogeneous Medium",
"doi": null,
"abstractUrl": "/proceedings-article/bwcca/2014/4173a371/12OmNzXFozK",
"parentPublication": {
"id": "proceedings/bwcca/2014/4173/0",
"title": "2014 Ninth International Conference on Broadband and Wireless Computing, Communication and Applications (BWCCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vtsa/1991/0036/0/00246720",
"title": "Quarter- and sub-quarter-micron optical lithography",
"doi": null,
"abstractUrl": "/proceedings-article/vtsa/1991/00246720/12OmNzwHvst",
"parentPublication": {
"id": "proceedings/vtsa/1991/0036/0",
"title": "1991 International Symposium on VLSI Technology, Systems, and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07383324",
"title": "Effects of Configuration of Optical Combiner on Near-Field Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07383324/13rRUwI5Ugg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600t9748",
"title": "Quantization-aware Deep Optics for Diffractive Snapshot Hyperspectral Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600t9748/1H0NBTZAs48",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cI6ar8DdyE",
"doi": "10.1109/VR.2019.8798245",
"title": "Shadowless Projector: Suppressing Shadows in Projection Mapping with Micro Mirror Array Plate",
"normalizedTitle": "Shadowless Projector: Suppressing Shadows in Projection Mapping with Micro Mirror Array Plate",
"abstract": "Shadowless Projector is projection mapping system in which a shadow (more specifically, umbra) does not suffer the projected result. A typical shadow removal technique used a multiple overlapping projection system. In this paper, we propose a shadow-less projection method with single projector. Inspired by a surgical light system that does not cast shadows on patients' bodies in clinical practice, we apply a special optical system that consists of methodically positioned vertical mirrors. This optical system works as a large aperture lens, it is impossible to block all projected ray by a small object such as a hand. Consequently, only penumbra is caused, which leads to a shadow-less projection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Shadowless Projector is projection mapping system in which a shadow (more specifically, umbra) does not suffer the projected result. A typical shadow removal technique used a multiple overlapping projection system. In this paper, we propose a shadow-less projection method with single projector. Inspired by a surgical light system that does not cast shadows on patients' bodies in clinical practice, we apply a special optical system that consists of methodically positioned vertical mirrors. This optical system works as a large aperture lens, it is impossible to block all projected ray by a small object such as a hand. Consequently, only penumbra is caused, which leads to a shadow-less projection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Shadowless Projector is projection mapping system in which a shadow (more specifically, umbra) does not suffer the projected result. A typical shadow removal technique used a multiple overlapping projection system. In this paper, we propose a shadow-less projection method with single projector. Inspired by a surgical light system that does not cast shadows on patients' bodies in clinical practice, we apply a special optical system that consists of methodically positioned vertical mirrors. This optical system works as a large aperture lens, it is impossible to block all projected ray by a small object such as a hand. Consequently, only penumbra is caused, which leads to a shadow-less projection.",
"fno": "08798245",
"keywords": [
"Image Processing",
"Lighting",
"Microlenses",
"Micromirrors",
"Optical Arrays",
"Optical Projectors",
"Surgery",
"Large Aperture Lens",
"Shadow Removal Technique",
"Vertical Mirrors",
"Shadow Less Projection Method",
"Multiple Overlapping Projection System",
"Projection Mapping System",
"Micromirror Array Plate",
"Shadowless Projector",
"Projected Ray",
"Special Optical System",
"Surgical Light System",
"Single Projector",
"Mirrors",
"Apertures",
"Lenses",
"Cameras",
"Entertainment Industry",
"Computer Vision",
"Light Sources",
"Projection Mapping",
"Micro Mirror Array Plate",
"Interactive System",
"Spatial Augmented Reality"
],
"authors": [
{
"affiliation": "Osaka University, Suita, Japan",
"fullName": "Kosuke Hiratani",
"givenName": "Kosuke",
"surname": "Hiratani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University, Suita, Japan",
"fullName": "Daisuke Iwai",
"givenName": "Daisuke",
"surname": "Iwai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University, Suita, Japan",
"fullName": "Parinya Punpongsanon",
"givenName": "Parinya",
"surname": "Punpongsanon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University, Suita, Japan",
"fullName": "Kosuke Sato",
"givenName": "Kosuke",
"surname": "Sato",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1309-1310",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797825",
"articleId": "1cJ1g65n5iU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797771",
"articleId": "1cJ0HVzBMre",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2008/2174/0/04761601",
"title": "Calibration of projector-camera systems from virtual mutual projection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460424",
"title": "Coded aperture for projector and camera for robust 3D measurement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460424/12OmNBpVQ2Y",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206614",
"title": "A projector-camera setup for geometry-invariant frequency demultiplexing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206614/12OmNvoWV1H",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a359",
"title": "A Method of Touching and Moving Virtual Shadows with Real Shadows",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a359/12OmNwpGgGH",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270475",
"title": "Projector Calibration using Arbitrary Planes and Calibrated Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270475/12OmNxYtu7r",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d568",
"title": "Active One-Shot Scan for Wide Depth Range Using a Light Field Projector Based on Coded Aperture",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d568/12OmNxdm4Cp",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543466",
"title": "Interactive display of image details using a camera-coupled mobile projector",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543466/12OmNzdoMtZ",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07014259",
"title": "Extended Depth-of-Field Projector by Fast Focal Sweep Projection",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07014259/13rRUxAASVV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09930626",
"title": "A Monocular Projector-Camera System using Modular Architecture",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09930626/1HMOYkaK9Ww",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523844",
"title": "Directionally Decomposing Structured Light for Projector Calibration",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysiY6ymKQ",
"doi": "10.1109/ISMAR-Adjunct.2019.00-37",
"title": "Compact Light Field Augmented Reality Display with Eliminated Stray Light using Discrete Structures",
"normalizedTitle": "Compact Light Field Augmented Reality Display with Eliminated Stray Light using Discrete Structures",
"abstract": "This paper discusses the design of a wearable display in the form of compact eyeglasses, supporting a fair field of view, correct focus cue, and optical see-through capacity. Based on integral imaging, our proposal comprises a discrete transparent microdisplay array as the image source and a discrete lenslet array as the spatial light modulator, without the need for a pre-imaging system or special prism. We designed an annular aperture array to eliminate stray light, controlled within an imperceptible limit. Through a stray light simulation and an imaging simulation, the system was proved to provide a good image quality for both virtual and real information.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper discusses the design of a wearable display in the form of compact eyeglasses, supporting a fair field of view, correct focus cue, and optical see-through capacity. Based on integral imaging, our proposal comprises a discrete transparent microdisplay array as the image source and a discrete lenslet array as the spatial light modulator, without the need for a pre-imaging system or special prism. We designed an annular aperture array to eliminate stray light, controlled within an imperceptible limit. Through a stray light simulation and an imaging simulation, the system was proved to provide a good image quality for both virtual and real information.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper discusses the design of a wearable display in the form of compact eyeglasses, supporting a fair field of view, correct focus cue, and optical see-through capacity. Based on integral imaging, our proposal comprises a discrete transparent microdisplay array as the image source and a discrete lenslet array as the spatial light modulator, without the need for a pre-imaging system or special prism. We designed an annular aperture array to eliminate stray light, controlled within an imperceptible limit. Through a stray light simulation and an imaging simulation, the system was proved to provide a good image quality for both virtual and real information.",
"fno": "476500a245",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Lenses",
"Microdisplays",
"Optical Arrays",
"Optical Design Techniques",
"Spatial Light Modulators",
"Stray Light",
"Three Dimensional Displays",
"Compact Light Field Augmented Reality Display",
"Eliminated Stray Light",
"Discrete Structures",
"Wearable Display",
"Compact Eyeglasses",
"Correct Focus Cue",
"Integral Imaging",
"Discrete Transparent Microdisplay Array",
"Image Source",
"Discrete Lenslet Array",
"Spatial Light Modulator",
"Pre Imaging System",
"Annular Aperture Array",
"Stray Light Simulation",
"Imaging Simulation",
"Apertures",
"Stray Light",
"Microdisplays",
"Optical Imaging",
"Adaptive Optics",
"Cameras",
"Augmented Reality Light Field Display Micro LED Display"
],
"authors": [
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Cheng Yao",
"givenName": "Cheng",
"surname": "Yao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Yue Liu",
"givenName": "Yue",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Dewen Cheng",
"givenName": "Dewen",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Yongtian Wang",
"givenName": "Yongtian",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "245-250",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a243",
"articleId": "1gysnyqF7Fu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a251",
"articleId": "1gysnhYMnjG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccp/2011/707/0/05753123",
"title": "Hand-held Schlieren Photography with Light Field probes",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2011/05753123/12OmNAqkSGI",
"parentPublication": {
"id": "proceedings/iccp/2011/707/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc-scalcom/2015/7211/0/07518340",
"title": "Performance Improvement of Visible Light Communication System Using Reed-Solomon Code",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc-scalcom/2015/07518340/12OmNqOOrJx",
"parentPublication": {
"id": "proceedings/uic-atc-scalcom/2015/7211/0",
"title": "2015 IEEE 12th Intl Conf on Ubiquitous Intelligence and Computing and 2015 IEEE 12th Intl Conf on Autonomic and Trusted Computing and 2015 IEEE 15th Intl Conf on Scalable Computing and Communications and Its Associated Workshops (UIC-ATC-ScalCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460973",
"title": "Position estimation of near point light sources using a clear hollow sphere",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460973/12OmNrJRPkJ",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460391",
"title": "Direct imaging with printed microlens arrays",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460391/12OmNs0TL48",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049693",
"title": "Shadowless Projection Mapping using Retrotransmissive Optics",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049693/1KYonRpS9fW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iucc-cit-dsci-smartcns/2022/7726/0/772600a319",
"title": "Underwater LED light MIMO telecentric optical path technology research",
"doi": null,
"abstractUrl": "/proceedings-article/iucc-cit-dsci-smartcns/2022/772600a319/1M4rfySedVe",
"parentPublication": {
"id": "proceedings/iucc-cit-dsci-smartcns/2022/7726/0",
"title": "2022 IEEE 21st International Conference on Ubiquitous Computing and Communications (IUCC/CIT/DSCI/SmartCNS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797796",
"title": "Full Parallax Table Top 3D Display Using Visually Equivalent Light Field",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797796/1cJ1cj63M3u",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2018/6956/0/695600a192",
"title": "Chromatic Aberration Analysis and Correction of Fore-Optics of a Light Field Imaging Spectrometer",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2018/695600a192/1e5ZoAZMvhm",
"parentPublication": {
"id": "proceedings/icnisc/2018/6956/0",
"title": "2018 4th Annual International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199567",
"title": "StainedView: Variable-Intensity Light-Attenuation Display with Cascaded Spatial Color Filtering for Improved Color Fidelity",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199567/1ncgpOWQBig",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icispc/2021/2425/0/242500a067",
"title": "Design of Ghost-free Aerial Display by Using Polarizing Plate and Dihedral Corner Reflector Array",
"doi": null,
"abstractUrl": "/proceedings-article/icispc/2021/242500a067/1zw6m3vShGM",
"parentPublication": {
"id": "proceedings/icispc/2021/2425/0",
"title": "2021 5th International Conference on Imaging, Signal Processing and Communications (ICISPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeJIZzYCGc",
"doi": "10.1109/CVPR46437.2021.00900",
"title": "Mask-ToF: Learning Microlens Masks for Flying Pixel Correction in Time-of-Flight Imaging",
"normalizedTitle": "Mask-ToF: Learning Microlens Masks for Flying Pixel Correction in Time-of-Flight Imaging",
"abstract": "We introduce Mask-ToF, a method to reduce flying pixels (FP) in time-of-flight (ToF) depth captures. FPs are pervasive artifacts which occur around depth edges, where light paths from both an object and its background are integrated over the aperture. This light mixes at a sensor pixel to produce erroneous depth estimates, which can adversely affect downstream 3D vision tasks. Mask-ToF starts at the source of these FPs, learning a microlens-level occlusion mask which effectively creates a custom-shaped sub-aperture for each sensor pixel. This modulates the selection of foreground and background light mixtures on a per-pixel basis and thereby encodes scene geometric information directly into the ToF measurements. We develop a differentiable ToF simulator to jointly train a convolutional neural network to decode this information and produce high-fidelity, low-FP depth reconstructions. We test the effectiveness of Mask-ToF on a simulated light field dataset and validate the method with an experimental prototype. To this end, we manufacture the learned amplitude mask and design an optical relay system to virtually place it on a high-resolution ToF sensor. We find that Mask-ToF generalizes well to real data without retraining, cutting FP counts in half.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce Mask-ToF, a method to reduce flying pixels (FP) in time-of-flight (ToF) depth captures. FPs are pervasive artifacts which occur around depth edges, where light paths from both an object and its background are integrated over the aperture. This light mixes at a sensor pixel to produce erroneous depth estimates, which can adversely affect downstream 3D vision tasks. Mask-ToF starts at the source of these FPs, learning a microlens-level occlusion mask which effectively creates a custom-shaped sub-aperture for each sensor pixel. This modulates the selection of foreground and background light mixtures on a per-pixel basis and thereby encodes scene geometric information directly into the ToF measurements. We develop a differentiable ToF simulator to jointly train a convolutional neural network to decode this information and produce high-fidelity, low-FP depth reconstructions. We test the effectiveness of Mask-ToF on a simulated light field dataset and validate the method with an experimental prototype. To this end, we manufacture the learned amplitude mask and design an optical relay system to virtually place it on a high-resolution ToF sensor. We find that Mask-ToF generalizes well to real data without retraining, cutting FP counts in half.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce Mask-ToF, a method to reduce flying pixels (FP) in time-of-flight (ToF) depth captures. FPs are pervasive artifacts which occur around depth edges, where light paths from both an object and its background are integrated over the aperture. This light mixes at a sensor pixel to produce erroneous depth estimates, which can adversely affect downstream 3D vision tasks. Mask-ToF starts at the source of these FPs, learning a microlens-level occlusion mask which effectively creates a custom-shaped sub-aperture for each sensor pixel. This modulates the selection of foreground and background light mixtures on a per-pixel basis and thereby encodes scene geometric information directly into the ToF measurements. We develop a differentiable ToF simulator to jointly train a convolutional neural network to decode this information and produce high-fidelity, low-FP depth reconstructions. We test the effectiveness of Mask-ToF on a simulated light field dataset and validate the method with an experimental prototype. To this end, we manufacture the learned amplitude mask and design an optical relay system to virtually place it on a high-resolution ToF sensor. We find that Mask-ToF generalizes well to real data without retraining, cutting FP counts in half.",
"fno": "450900j112",
"keywords": [
"Computer Vision",
"Feature Extraction",
"Image Reconstruction",
"Image Representation",
"Image Segmentation",
"Image Sensors",
"Learning Artificial Intelligence",
"Neural Nets",
"Object Detection",
"Object Tracking",
"Sensors",
"Stereo Image Processing",
"Microlens Masks",
"Flying Pixel Correction",
"Time Of Flight Imaging",
"Time Of Flight Depth Captures",
"Sensor Pixel",
"Erroneous Depth Estimates",
"Mask To F Starts",
"Microlens Level Occlusion Mask",
"Per Pixel Basis",
"To F Measurements",
"Differentiable To F Simulator",
"Low FP Depth Reconstructions",
"Learned Amplitude Mask",
"High Resolution To F Sensor",
"Three Dimensional Displays",
"Optical Imaging",
"Cameras",
"Throughput",
"Adaptive Optics",
"Optical Sensors",
"Relays"
],
"authors": [
{
"affiliation": "Princeton University",
"fullName": "Ilya Chugunov",
"givenName": "Ilya",
"surname": "Chugunov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Princeton University",
"fullName": "Seung-Hwan Baek",
"givenName": "Seung-Hwan",
"surname": "Baek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "King Abdullah University of Science and Technology",
"fullName": "Qiang Fu",
"givenName": "Qiang",
"surname": "Fu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "King Abdullah University of Science and Technology",
"fullName": "Wolfgang Heidrich",
"givenName": "Wolfgang",
"surname": "Heidrich",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Princeton University",
"fullName": "Felix Heide",
"givenName": "Felix",
"surname": "Heide",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "9112-9122",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeJIQUVyA8",
"name": "pcvpr202145090-09578501s1-mm_450900j112.zip",
"size": "19.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09578501s1-mm_450900j112.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900j101",
"articleId": "1yeKTkQzhdK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900j123",
"articleId": "1yeIijSbnPy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2015/8332/0/8332a001",
"title": "Depth Fields: Extending Light Field Techniques to Time-of-Flight Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a001/12OmNC943Ci",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07299062",
"title": "Simultaneous Time-of-Flight sensing and photometric stereo with a single ToF sensor",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07299062/12OmNvTBB3m",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2018/2526/0/08368473",
"title": "SH-ToF: Micro resolution time-of-flight imaging with superheterodyne interferometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2018/08368473/12OmNzBwGEy",
"parentPublication": {
"id": "proceedings/iccp/2018/2526/0",
"title": "2018 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771333",
"title": "Estimating human 3D pose from Time-of-Flight images based on geodesic distances and optical flow",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771333/12OmNzgNXR0",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2017/03/07725551",
"title": "A Fast Process-Variation-Aware Mask Optimization Algorithm With a Novel Intensity Modeling",
"doi": null,
"abstractUrl": "/journal/si/2017/03/07725551/13rRUwgyOe7",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049693",
"title": "Shadowless Projection Mapping using Retrotransmissive Optics",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049693/1KYonRpS9fW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08827571",
"title": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08827571/1dgvaPxmhbi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300j993",
"title": "Deep End-to-End Alignment and Refinement for Time-of-Flight RGB-D Module",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300j993/1hVlMAmNFTO",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a820",
"title": "Restoration of Motion Blur in Time-of-Flight Depth Image Using Data Alignment",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a820/1qyxiQKWmti",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvqW6W4",
"title": "2016 IEEE Third VR International Workshop on Collaborative Virtual Environments (3DCVE)",
"acronym": "3dcve",
"groupId": "1807844",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxG1yHz",
"doi": "10.1109/3DCVE.2016.7563559",
"title": "Vishnu: virtual immersive support for HelpiNg users an interaction paradigm for collaborative remote guiding in mixed reality",
"normalizedTitle": "Vishnu: virtual immersive support for HelpiNg users an interaction paradigm for collaborative remote guiding in mixed reality",
"abstract": "Increasing networking performances as well as the emergence of Mixed Reality (MR) technologies make possible providing advanced interfaces to improve remote collaboration. In this paper, we present our novel interaction paradigm called Vishnu that aims to ease collaborative remote guiding. We focus on collaborative remote maintenance as an illustrative use case. It relies on an expert immersed in Virtual Reality (VR) in the remote workspace of a local agent helped through an Augmented Reality (AR) interface. The main idea of the Vishnu paradigm is to provide the local agent with two additional virtual arms controlled by the remote expert who can use them as interactive guidance tools. Many challenges come with this: collocation, inverse kinematics (IK), the perception of the remote collaborator and gestures coordination. Vishnu aims to enhance the maintenance procedure thanks to a remote expert who can show to the local agent the exact gestures and actions to perform. Our pilot user study shows that it may decrease the cognitive load compared to a usual approach based on the mapping of 2D and de-localized informations, and it could be used by agents in order to perform specific procedures without needing to have an available local expert.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Increasing networking performances as well as the emergence of Mixed Reality (MR) technologies make possible providing advanced interfaces to improve remote collaboration. In this paper, we present our novel interaction paradigm called Vishnu that aims to ease collaborative remote guiding. We focus on collaborative remote maintenance as an illustrative use case. It relies on an expert immersed in Virtual Reality (VR) in the remote workspace of a local agent helped through an Augmented Reality (AR) interface. The main idea of the Vishnu paradigm is to provide the local agent with two additional virtual arms controlled by the remote expert who can use them as interactive guidance tools. Many challenges come with this: collocation, inverse kinematics (IK), the perception of the remote collaborator and gestures coordination. Vishnu aims to enhance the maintenance procedure thanks to a remote expert who can show to the local agent the exact gestures and actions to perform. Our pilot user study shows that it may decrease the cognitive load compared to a usual approach based on the mapping of 2D and de-localized informations, and it could be used by agents in order to perform specific procedures without needing to have an available local expert.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Increasing networking performances as well as the emergence of Mixed Reality (MR) technologies make possible providing advanced interfaces to improve remote collaboration. In this paper, we present our novel interaction paradigm called Vishnu that aims to ease collaborative remote guiding. We focus on collaborative remote maintenance as an illustrative use case. It relies on an expert immersed in Virtual Reality (VR) in the remote workspace of a local agent helped through an Augmented Reality (AR) interface. The main idea of the Vishnu paradigm is to provide the local agent with two additional virtual arms controlled by the remote expert who can use them as interactive guidance tools. Many challenges come with this: collocation, inverse kinematics (IK), the perception of the remote collaborator and gestures coordination. Vishnu aims to enhance the maintenance procedure thanks to a remote expert who can show to the local agent the exact gestures and actions to perform. Our pilot user study shows that it may decrease the cognitive load compared to a usual approach based on the mapping of 2D and de-localized informations, and it could be used by agents in order to perform specific procedures without needing to have an available local expert.",
"fno": "07563559",
"keywords": [
"Three Dimensional Displays",
"Virtual Reality",
"Collaboration",
"Cameras",
"Layout",
"Maintenance Engineering",
"Navigation",
"I 3 6 Computer Graphics Methodology And Techniques Interaction Tech",
"H 5 1 Information Interfaces And Presentation E G HCI Multimedia Information Systems Artificial Augmented And Virtual Realities",
"H 5 2 Information Interfaces And Presentation E G HCI User Interfaces Prototyping"
],
"authors": [
{
"affiliation": null,
"fullName": "Morgan Le Chenechal",
"givenName": "Morgan Le",
"surname": "Chenechal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Telecom Bretagne, Lab-STICC",
"fullName": "Thierry Duval",
"givenName": "Thierry",
"surname": "Duval",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "INSA Rennes, Irisa/INRIA",
"fullName": "Valerie Gouranton",
"givenName": "Valerie",
"surname": "Gouranton",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jerome Royan",
"givenName": "Jerome",
"surname": "Royan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "INSA Rennes, Irisa/INRIA",
"fullName": "Bruno Arnaldi",
"givenName": "Bruno",
"surname": "Arnaldi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dcve",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "9-12",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-2138-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07563558",
"articleId": "12OmNwE9OSy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07563560",
"articleId": "12OmNwGIcCf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2013/6097/0/06550237",
"title": "Poster: 3D referencing for remote task assistance in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550237/12OmNqC2uWf",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671839",
"title": "Towards object based manipulation in remote guidance",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671839/12OmNxd4tri",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2012/4651/0/4651a663",
"title": "Instruction for Remote MR Cooperative Work with Captured Still Worker's View Video",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2012/4651a663/12OmNxdDFSs",
"parentPublication": {
"id": "proceedings/aina/2012/4651/0",
"title": "2012 IEEE 26th International Conference on Advanced Information Networking and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699260",
"title": "Comparing Different Augmented Reality Support Applications for Cooperative Repair of an Industrial Robot",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699260/19F1M8A6RHO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699225",
"title": "Augmented Reality Remote Collaboration with Dense Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699225/19F1OvIhORa",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a346",
"title": "Mixed Reality Communication for Medical Procedures: Teaching the Placement of a Central Venous Catheter",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a346/1JrR1uZty2k",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a091",
"title": "An MR Remote Collaborative Platform Based on 3D CAD Models for Training in Industry",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a091/1gysneD006s",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089500",
"title": "A User Study on View-sharing Techniques for One-to-Many Mixed Reality Collaborations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089500/1jIxeQ9QEVy",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a653",
"title": "Remote Assistance with Mixed Reality for Procedural Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a653/1tnXsEDjkKQ",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a532",
"title": "TeleGate: Immersive Multi-User Collaboration for Mixed Reality 360°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a532/1tnXy7NpnGg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJelpv0Txm",
"doi": "10.1109/VRW55335.2022.00321",
"title": "[DC] Improving Multi-User Interaction for Mixed Reality Telecollaboration",
"normalizedTitle": "[DC] Improving Multi-User Interaction for Mixed Reality Telecollaboration",
"abstract": "Mixed reality (MR) approaches offer merging of real and virtual worlds to create new environments and visualizations for real-time interaction. Existing MR systems, however, do not utilise user real environment, lack detail in dynamic environments, and often lack multi-user capabilities. This research focuses on exploring multiuser aspects of immersive collaboration, where an arbitrary number of co-located and remotely located users can collaborate in a single or merged collaborative MR space. The aim is to enable users to experience VR/AR together, irrespective of the type of HMD, and facilitate users with their collaborative tasks. The main goal is to develop an immersive collaboration platform in which users can utilize the space around them and at the same time collaborate and switch between different perspectives of other co-located and remote users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mixed reality (MR) approaches offer merging of real and virtual worlds to create new environments and visualizations for real-time interaction. Existing MR systems, however, do not utilise user real environment, lack detail in dynamic environments, and often lack multi-user capabilities. This research focuses on exploring multiuser aspects of immersive collaboration, where an arbitrary number of co-located and remotely located users can collaborate in a single or merged collaborative MR space. The aim is to enable users to experience VR/AR together, irrespective of the type of HMD, and facilitate users with their collaborative tasks. The main goal is to develop an immersive collaboration platform in which users can utilize the space around them and at the same time collaborate and switch between different perspectives of other co-located and remote users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mixed reality (MR) approaches offer merging of real and virtual worlds to create new environments and visualizations for real-time interaction. Existing MR systems, however, do not utilise user real environment, lack detail in dynamic environments, and often lack multi-user capabilities. This research focuses on exploring multiuser aspects of immersive collaboration, where an arbitrary number of co-located and remotely located users can collaborate in a single or merged collaborative MR space. The aim is to enable users to experience VR/AR together, irrespective of the type of HMD, and facilitate users with their collaborative tasks. The main goal is to develop an immersive collaboration platform in which users can utilize the space around them and at the same time collaborate and switch between different perspectives of other co-located and remote users.",
"fno": "840200a940",
"keywords": [
"Data Visualisation",
"Groupware",
"Helmet Mounted Displays",
"Virtual Reality",
"DC Improving Multiuser Interaction",
"Mixed Reality Telecollaboration",
"Mixed Reality Approaches",
"Virtual Worlds",
"Real Time Interaction",
"User Real Environment",
"Dynamic Environments",
"Lack Multiuser Capabilities",
"Multiuser Aspects",
"Arbitrary Number",
"Collaborative Tasks",
"Immersive Collaboration Platform",
"Time Collaborate",
"Remote Users",
"Visualization",
"Three Dimensional Displays",
"Conferences",
"Collaboration",
"Mixed Reality",
"Virtual Reality",
"Switches",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "Computational Media Innovation Centre, Victoria University of Wellington",
"fullName": "Faisal Zaman",
"givenName": "Faisal",
"surname": "Zaman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "940-941",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a938",
"articleId": "1CJd1lHH4D6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a942",
"articleId": "1CJdXb7ABC8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isar/2001/1375/0/13750169",
"title": "MR2 (MR Square): A Mixed-Reality Meeting Room",
"doi": null,
"abstractUrl": "/proceedings-article/isar/2001/13750169/12OmNzyYibC",
"parentPublication": {
"id": "proceedings/isar/2001/1375/0",
"title": "Proceedings IEEE and ACM International Symposium on Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a928",
"title": "[DC] Mixed Reality Interaction for Mobile Knowledge Work",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a928/1CJdRhDCDTO",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a615",
"title": "A Shared Interactive Space in Mixed Reality for Collaborative Digital Tower Operations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a615/1J7W9HboPmg",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a923",
"title": "Cross Reality Authoring: A Mixed Reality Editor approach",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a923/1J7WtZdBjig",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049704",
"title": "A Survey on Remote Assistance and Training in Mixed Reality Environments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049704/1KYowCHxUtO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199575",
"title": "Eyes-free Target Acquisition During Walking in Immersive Mixed Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199575/1ncgpmtzdn2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a009",
"title": "Egocentric viewpoint in mixed reality situated visualization: challenges and opportunities",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a009/1rSRe0PXIgo",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a532",
"title": "TeleGate: Immersive Multi-User Collaboration for Mixed Reality 360°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a532/1tnXy7NpnGg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a399",
"title": "Detecting and Preventing Faked Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a399/1xPsmX6Ouvm",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a274",
"title": "A Mixed-Reality System to Promote Child Engagement in Remote Intergenerational Storytelling",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a274/1yeQMxSyLp6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7W7HJ1DOw",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00023",
"title": "Analyzing the Potential of Remote Collaboration in Industrial Mixed and Virtual Reality Environments",
"normalizedTitle": "Analyzing the Potential of Remote Collaboration in Industrial Mixed and Virtual Reality Environments",
"abstract": "Remote assistance tools in customer support allow for fast response times and reduced service expenses. Video and screen-based applications are widely used in various areas but lack spatial interactivity due to the limitation to a two-dimensional screen. Mixed reality-supported remote collaboration is frequently addressed in research as a way to extend the interaction space. The presented systems often require complicated device setups, calibration, or precomputation for reconstruction of the physical environment. This work investigates a novel approach for location-independent remote assistance and collaboration utilizing state-of-the-art mixed reality devices without any additional hardware modifications. In our approach a local user scans the environment using a HoloLens' standard depth camera. The data is transferred to a remote user equipped with a Virtual Reality headset, where the environment is reconstructed in real-time. The users can interact in the same shared environment. In a user study, the perceived workload was measured for both sides to examine the practical applicability of our approach. The results show that for local users the tasks set resulted in a low mental load, but also indicate that the coarse representation of the environment was mentally demanding for the remote users to work with. With our work we want to make remote collaboration easier and viable for a broader range of applications and facilitate interactions between globally distributed users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Remote assistance tools in customer support allow for fast response times and reduced service expenses. Video and screen-based applications are widely used in various areas but lack spatial interactivity due to the limitation to a two-dimensional screen. Mixed reality-supported remote collaboration is frequently addressed in research as a way to extend the interaction space. The presented systems often require complicated device setups, calibration, or precomputation for reconstruction of the physical environment. This work investigates a novel approach for location-independent remote assistance and collaboration utilizing state-of-the-art mixed reality devices without any additional hardware modifications. In our approach a local user scans the environment using a HoloLens' standard depth camera. The data is transferred to a remote user equipped with a Virtual Reality headset, where the environment is reconstructed in real-time. The users can interact in the same shared environment. In a user study, the perceived workload was measured for both sides to examine the practical applicability of our approach. The results show that for local users the tasks set resulted in a low mental load, but also indicate that the coarse representation of the environment was mentally demanding for the remote users to work with. With our work we want to make remote collaboration easier and viable for a broader range of applications and facilitate interactions between globally distributed users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Remote assistance tools in customer support allow for fast response times and reduced service expenses. Video and screen-based applications are widely used in various areas but lack spatial interactivity due to the limitation to a two-dimensional screen. Mixed reality-supported remote collaboration is frequently addressed in research as a way to extend the interaction space. The presented systems often require complicated device setups, calibration, or precomputation for reconstruction of the physical environment. This work investigates a novel approach for location-independent remote assistance and collaboration utilizing state-of-the-art mixed reality devices without any additional hardware modifications. In our approach a local user scans the environment using a HoloLens' standard depth camera. The data is transferred to a remote user equipped with a Virtual Reality headset, where the environment is reconstructed in real-time. The users can interact in the same shared environment. In a user study, the perceived workload was measured for both sides to examine the practical applicability of our approach. The results show that for local users the tasks set resulted in a low mental load, but also indicate that the coarse representation of the environment was mentally demanding for the remote users to work with. With our work we want to make remote collaboration easier and viable for a broader range of applications and facilitate interactions between globally distributed users.",
"fno": "536500a066",
"keywords": [
"Augmented Reality",
"Customer Services",
"Groupware",
"User Interfaces",
"Complicated Device Setups",
"Customer Support",
"Fast Response Times",
"Holo Lens Standard Depth Camera",
"Industrial Mixed Reality Environments",
"Industrial Virtual Reality Environments",
"Interaction Space",
"Lack Spatial Interactivity",
"Local User",
"Location Independent Remote Assistance",
"Mixed Reality Devices",
"Mixed Reality Supported Remote Collaboration",
"Physical Environment",
"Reduced Service Expenses",
"Remote Assistance Tools",
"Remote User",
"Screen Based Applications",
"Two Dimensional Screen",
"Virtual Reality Headset",
"Headphones",
"Collaboration",
"Mixed Reality",
"Cameras",
"Real Time Systems",
"Hardware",
"Time Factors",
"Mixed Reality",
"Virtual Reality",
"Industry 4 0",
"Remote Collaboration",
"Remote Assistance",
"I 4 5 Image Processing And Computer Vision",
"Reconstruction",
"J 6 Computer Applications",
"Computer Aided Engineering"
],
"authors": [
{
"affiliation": "University of Applied Sciences Upper Austria, School of Business and Management,Steyr,Austria,4400",
"fullName": "Daniel Niedermayr",
"givenName": "Daniel",
"surname": "Niedermayr",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences Upper Austria, School of Business and Management,Steyr,Austria,4400",
"fullName": "Josef Wolfartsberger",
"givenName": "Josef",
"surname": "Wolfartsberger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences Upper Austria, School of Business and Management,Steyr,Austria,4400",
"fullName": "Marijo Borac",
"givenName": "Marijo",
"surname": "Borac",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences Upper Austria, School of Business and Management,Steyr,Austria,4400",
"fullName": "Robert Brandl",
"givenName": "Robert",
"surname": "Brandl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences Upper Austria, School of Business and Management,Steyr,Austria,4400",
"fullName": "Marcel Huber",
"givenName": "Marcel",
"surname": "Huber",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences Upper Austria, School of Business and Management,Steyr,Austria,4400",
"fullName": "Petar Josipovic",
"givenName": "Petar",
"surname": "Josipovic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "66-73",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a060",
"articleId": "1J7WhVnnWpO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a074",
"articleId": "1J7Wfujnyx2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a218",
"title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a485",
"title": "Subjective and Objective Analyses of Collaboration and Co-Presence in a Virtual Reality Remote Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a485/1CJcLeKILw4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a940",
"title": "[DC] Improving Multi-User Interaction for Mixed Reality Telecollaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a940/1CJelpv0Txm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a854",
"title": "Jamming in MR: Towards Real-Time Music Collaboration in Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a854/1CJfkgeKhs4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a394",
"title": "The Effects of Device and Spatial Layout on Social Presence During a Dynamic Remote Collaboration Task in Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a394/1JrQSbBWZa0",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798128",
"title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798128/1cJ1aXJnUyI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a532",
"title": "TeleGate: Immersive Multi-User Collaboration for Mixed Reality 360°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a532/1tnXy7NpnGg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a399",
"title": "Detecting and Preventing Faked Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a399/1xPsmX6Ouvm",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a435",
"title": "Multi-scale Mixed Reality Collaboration for Digital Twin",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a435/1yeQLyb4LpC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a274",
"title": "A Mixed-Reality System to Promote Child Engagement in Remote Intergenerational Storytelling",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a274/1yeQMxSyLp6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrQSbBWZa0",
"doi": "10.1109/ISMAR55827.2022.00055",
"title": "The Effects of Device and Spatial Layout on Social Presence During a Dynamic Remote Collaboration Task in Mixed Reality",
"normalizedTitle": "The Effects of Device and Spatial Layout on Social Presence During a Dynamic Remote Collaboration Task in Mixed Reality",
"abstract": "This paper evaluates factors of social presence during a dynamic remote collaboration task in a technologically asymmetric Mixed Reality (MR) setting for two spatial layouts. While active movement during MR remote collaboration is afforded by how the shared 3D space is mediated and configured, studies investigating the impact of these conditions on user experience have been scarce. In a between-group study Z_$(\\mathrm{n}=48)$_Z, a host user in Augmented Reality (AR) and a remote user in Virtual Reality (VR), both wearing Head Mounted Displays (HMDs), simultaneously moved around the shared space to find and assemble parts of a Mars exploration rover together, one group in a Peripheral layout and the other in a Scattered layout with disparate levels of spatial affordance. Results show that while VR facilitates higher co-presence and spatial presence than AR through HMDs, the Peripheral layout enables users to pay more attention to one another than the Scattered. We analyze the results and derive implications aimed at bridging the AR-VR gap in social presence for dynamic MR remote collaboration through the adaptive placement of virtual content in shared spaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper evaluates factors of social presence during a dynamic remote collaboration task in a technologically asymmetric Mixed Reality (MR) setting for two spatial layouts. While active movement during MR remote collaboration is afforded by how the shared 3D space is mediated and configured, studies investigating the impact of these conditions on user experience have been scarce. In a between-group study $(\\mathrm{n}=48)$, a host user in Augmented Reality (AR) and a remote user in Virtual Reality (VR), both wearing Head Mounted Displays (HMDs), simultaneously moved around the shared space to find and assemble parts of a Mars exploration rover together, one group in a Peripheral layout and the other in a Scattered layout with disparate levels of spatial affordance. Results show that while VR facilitates higher co-presence and spatial presence than AR through HMDs, the Peripheral layout enables users to pay more attention to one another than the Scattered. We analyze the results and derive implications aimed at bridging the AR-VR gap in social presence for dynamic MR remote collaboration through the adaptive placement of virtual content in shared spaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper evaluates factors of social presence during a dynamic remote collaboration task in a technologically asymmetric Mixed Reality (MR) setting for two spatial layouts. While active movement during MR remote collaboration is afforded by how the shared 3D space is mediated and configured, studies investigating the impact of these conditions on user experience have been scarce. In a between-group study -, a host user in Augmented Reality (AR) and a remote user in Virtual Reality (VR), both wearing Head Mounted Displays (HMDs), simultaneously moved around the shared space to find and assemble parts of a Mars exploration rover together, one group in a Peripheral layout and the other in a Scattered layout with disparate levels of spatial affordance. Results show that while VR facilitates higher co-presence and spatial presence than AR through HMDs, the Peripheral layout enables users to pay more attention to one another than the Scattered. We analyze the results and derive implications aimed at bridging the AR-VR gap in social presence for dynamic MR remote collaboration through the adaptive placement of virtual content in shared spaces.",
"fno": "532500a394",
"keywords": [
"Augmented Reality",
"Groupware",
"Helmet Mounted Displays",
"Mars",
"Virtual Reality",
"Augmented Reality",
"Between Group Study",
"Dynamic MR Remote Collaboration",
"Dynamic Remote Collaboration Task",
"Higher Co Presence",
"Host User",
"Peripheral Layout",
"Remote User",
"Scattered Layout",
"Shared 3 D Space",
"Shared Space",
"Social Presence",
"Spatial Affordance",
"Spatial Layout",
"Spatial Presence",
"Technologically Asymmetric Mixed Reality",
"User Experience",
"Virtual Reality",
"Wearing Head Mounted Displays",
"Space Vehicles",
"Three Dimensional Displays",
"Affordances",
"Layout",
"Collaboration",
"Mixed Reality",
"User Experience",
"Human Centered Computing",
"Human Computer Interaction",
"Interaction Paradigms",
"Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "KAIST KI-ITC ARRC",
"fullName": "Jae-Eun Shin",
"givenName": "Jae-Eun",
"surname": "Shin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST UVR Lab",
"fullName": "Boram Yoon",
"givenName": "Boram",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST UVR Lab",
"fullName": "Dooyoung Kim",
"givenName": "Dooyoung",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST UVR Lab",
"fullName": "Hyung-Il Kim",
"givenName": "Hyung-Il",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST UVR Lab",
"fullName": "Woontack Woo",
"givenName": "Woontack",
"surname": "Woo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "394-403",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "532500a384",
"articleId": "1JrQZPMRLW0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a404",
"articleId": "1JrRlimqMKc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948517",
"title": "Collaboration in mediated and augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948517/12OmNy6HQPU",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvvrhc/1998/8283/0/82830078",
"title": "Vision and Graphics in Producing Mixed Reality Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1",
"parentPublication": {
"id": "proceedings/cvvrhc/1998/8283/0",
"title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a940",
"title": "[DC] Improving Multi-User Interaction for Mixed Reality Telecollaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a940/1CJelpv0Txm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cste/2022/8188/0/818800a082",
"title": "Integrating Inquiry-Based Pedagogy with Mixed Reality: Theories and Practices",
"doi": null,
"abstractUrl": "/proceedings-article/cste/2022/818800a082/1J7VZM9bxDi",
"parentPublication": {
"id": "proceedings/cste/2022/8188/0",
"title": "2022 4th International Conference on Computer Science and Technologies in Education (CSTE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a674",
"title": "Mutual Space Generation with Relative Translation Gains in Redirected Walking for Asymmetric Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a674/1J7WiJIlxza",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a865",
"title": "Learning and Teaching Fluid Dynamics using Augmented and Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a865/1J7Wr5spc76",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049710",
"title": "Exploring Plausibility and Presence in Mixed Reality Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049710/1KYoplRZLWM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a520",
"title": "Evaluating Remote Virtual Hands Models on Social Presence in Hand-based 3D Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a520/1pysxMcaE2Q",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a653",
"title": "Adjusting Relative Translation Gains According to Space Size in Redirected Walking for Mixed Reality Mutual Space Generation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a653/1tuANZ6Iz3q",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a435",
"title": "Multi-scale Mixed Reality Collaboration for Digital Twin",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a435/1yeQLyb4LpC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysneD006s",
"doi": "10.1109/ISMAR-Adjunct.2019.00038",
"title": "An MR Remote Collaborative Platform Based on 3D CAD Models for Training in Industry",
"normalizedTitle": "An MR Remote Collaborative Platform Based on 3D CAD Models for Training in Industry",
"abstract": "In this paper, we describe a new Mixed Reality (MR) remote collaborative platform making use of 3D CAD models for training in the manufacturing industry. It enables a remote expert in Virtual Reality (VR) to train a local worker in a physical assembly task. For the local site, we use Spatial Augmented Reality (SAR) to enable the local worker see virtual cues without wearing any AR devices, leaving their user hands free to easily manipulate the physical parts. For the remote expert, we construct a 3D virtual environment using virtual replicas of the physical parts. We also report on the results of a usability study of the prototype.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we describe a new Mixed Reality (MR) remote collaborative platform making use of 3D CAD models for training in the manufacturing industry. It enables a remote expert in Virtual Reality (VR) to train a local worker in a physical assembly task. For the local site, we use Spatial Augmented Reality (SAR) to enable the local worker see virtual cues without wearing any AR devices, leaving their user hands free to easily manipulate the physical parts. For the remote expert, we construct a 3D virtual environment using virtual replicas of the physical parts. We also report on the results of a usability study of the prototype.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we describe a new Mixed Reality (MR) remote collaborative platform making use of 3D CAD models for training in the manufacturing industry. It enables a remote expert in Virtual Reality (VR) to train a local worker in a physical assembly task. For the local site, we use Spatial Augmented Reality (SAR) to enable the local worker see virtual cues without wearing any AR devices, leaving their user hands free to easily manipulate the physical parts. For the remote expert, we construct a 3D virtual environment using virtual replicas of the physical parts. We also report on the results of a usability study of the prototype.",
"fno": "476500a091",
"keywords": [
"Augmented Reality",
"CAD",
"Computer Based Training",
"Groupware",
"Manufacturing Industries",
"Production Engineering Computing",
"Solid Modelling",
"MR Remote Collaborative Platform",
"3 D CAD Models",
"Manufacturing Industry",
"Remote Expert",
"Virtual Reality",
"Physical Assembly Task",
"Spatial Augmented Reality",
"3 D Virtual Environment",
"Mixed Reality Remote Collaborative Platform",
"Training In Industry",
"Three Dimensional Displays",
"Training",
"Collaboration",
"Solid Modeling",
"Task Analysis",
"Virtual Reality",
"Usability",
"Remote Collaboration",
"3 D CAD Models",
"Training In The Industry",
"Augmented Reality",
"Mixed Reality"
],
"authors": [
{
"affiliation": "Northwestern Polytechnical University, China; Cyber-Reality Innovation Center, China",
"fullName": "Peng Wang",
"givenName": "Peng",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, China; Cyber-Reality Innovation Center, China",
"fullName": "Xiaoliang Bai",
"givenName": "Xiaoliang",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, China; Cyber-Reality Innovation Center, China; University of South Australia",
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, China; Cyber-Reality Innovation Center, China",
"fullName": "Shusheng Zhang",
"givenName": "Shusheng",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, China; Cyber-Reality Innovation Center, China",
"fullName": "Dechuan Han",
"givenName": "Dechuan",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, China, Cyber-Reality Innovation Center, China",
"fullName": "Hao Lv",
"givenName": "Hao",
"surname": "Lv",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, China, Cyber-Reality Innovation Center, China",
"fullName": "Weiping He",
"givenName": "Weiping",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, China, Cyber-Reality Innovation Center, China",
"fullName": "Yuxiang Yan",
"givenName": "Yuxiang",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, China, Cyber-Reality Innovation Center, China",
"fullName": "Xiangyu Zhang",
"givenName": "Xiangyu",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, China, Cyber-Reality Innovation Center, China",
"fullName": "Haitao Min",
"givenName": "Haitao",
"surname": "Min",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "91-92",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a089",
"articleId": "1gysihxRfB6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a093",
"articleId": "1gysmQZKCFq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/saint/2009/3700/0/3700a043",
"title": "An Instruction Method for Displaying Trajectory of an Object in Remote Collaborative MR on the Basis of Changes in Relative Coordinates",
"doi": null,
"abstractUrl": "/proceedings-article/saint/2009/3700a043/12OmNBBhN5d",
"parentPublication": {
"id": "proceedings/saint/2009/3700/0",
"title": "2009 Ninth Annual International Symposium on Applications and the Internet",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643588",
"title": "Augmentation of check in/out model for remote collaboration with Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643588/12OmNC4eSy7",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671839",
"title": "Towards object based manipulation in remote guidance",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671839/12OmNxd4tri",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2012/4651/0/4651a663",
"title": "Instruction for Remote MR Cooperative Work with Captured Still Worker's View Video",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2012/4651a663/12OmNxdDFSs",
"parentPublication": {
"id": "proceedings/aina/2012/4651/0",
"title": "2012 IEEE 26th International Conference on Advanced Information Networking and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480753",
"title": "Symmetric Model of Remote Collaborative MR Using Tangible Replicas",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480753/12OmNyL0TDr",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dcve/2015/1840/0/07153591",
"title": "Remote collaboration across heterogeneous large interactive spaces",
"doi": null,
"abstractUrl": "/proceedings-article/3dcve/2015/07153591/12OmNzaQoHO",
"parentPublication": {
"id": "proceedings/3dcve/2015/1840/0",
"title": "2015 IEEE Second VR International Workshop on Collaborative Virtual Environments (3DCVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a766",
"title": "MR-RIEW: An MR Toolkit for Designing Remote Immersive Experiment Workflows",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a766/1CJemuUb5Be",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a327",
"title": "Augmented Virtuality Training for Special Education Teachers",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a327/1J7WbAdfchq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798024",
"title": "Head Pointer or Eye Gaze: Which Helps More in MR Remote Collaboration?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798024/1cJ0MmguvG8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a520",
"title": "Evaluating Remote Virtual Hands Models on Social Presence in Hand-based 3D Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a520/1pysxMcaE2Q",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXsEDjkKQ",
"doi": "10.1109/VRW52623.2021.00209",
"title": "Remote Assistance with Mixed Reality for Procedural Tasks",
"normalizedTitle": "Remote Assistance with Mixed Reality for Procedural Tasks",
"abstract": "We present a volumetric communication system that is designed for remote assistance of procedural tasks. The system allows a remote expert to visually guide a local operator. The two parties share a view that is spatially identical, but for the local operator it is of the object on which they operate, while for the remote expert, the object is presented as a mixed reality \"hologram\". Guidance is provided by voice, gestures, and annotations performed directly on the object of interest or its hologram. At each end of the communication, spatial is visualized using mixed-reality glasses.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a volumetric communication system that is designed for remote assistance of procedural tasks. The system allows a remote expert to visually guide a local operator. The two parties share a view that is spatially identical, but for the local operator it is of the object on which they operate, while for the remote expert, the object is presented as a mixed reality \"hologram\". Guidance is provided by voice, gestures, and annotations performed directly on the object of interest or its hologram. At each end of the communication, spatial is visualized using mixed-reality glasses.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a volumetric communication system that is designed for remote assistance of procedural tasks. The system allows a remote expert to visually guide a local operator. The two parties share a view that is spatially identical, but for the local operator it is of the object on which they operate, while for the remote expert, the object is presented as a mixed reality \"hologram\". Guidance is provided by voice, gestures, and annotations performed directly on the object of interest or its hologram. At each end of the communication, spatial is visualized using mixed-reality glasses.",
"fno": "405700a653",
"keywords": [
"Augmented Reality",
"Gesture Recognition",
"Virtual Reality",
"Remote Assistance",
"Procedural Tasks",
"Volumetric Communication System",
"Remote Expert",
"Local Operator",
"Mixed Reality Hologram",
"Mixed Reality Glasses",
"Visualization",
"Three Dimensional Displays",
"Communication Systems",
"Annotations",
"Conferences",
"Mixed Reality",
"Medical Services",
"Remote Procedure",
"Augmented Reality",
"Virtual Presence"
],
"authors": [
{
"affiliation": "American University",
"fullName": "Manuel Rebol",
"givenName": "Manuel",
"surname": "Rebol",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "George Washington University",
"fullName": "Colton Hood",
"givenName": "Colton",
"surname": "Hood",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "George Washington University",
"fullName": "Claudia Ranniger",
"givenName": "Claudia",
"surname": "Ranniger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "George Washington University",
"fullName": "Adam Rutenberg",
"givenName": "Adam",
"surname": "Rutenberg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "George Washington University",
"fullName": "Neal Sikka",
"givenName": "Neal",
"surname": "Sikka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "American University",
"fullName": "Erin Maria Horan",
"givenName": "Erin Maria",
"surname": "Horan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graz University of Technology",
"fullName": "Christian Gütl",
"givenName": "Christian",
"surname": "Gütl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "American University",
"fullName": "Krzysztof Pietroszek",
"givenName": "Krzysztof",
"surname": "Pietroszek",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "653-654",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a651",
"articleId": "1tnXmFNwhZ6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a655",
"articleId": "1tnY70dT4cM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2013/6097/0/06550237",
"title": "Poster: 3D referencing for remote task assistance in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550237/12OmNqC2uWf",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dcve/2016/2138/0/07563559",
"title": "Vishnu: virtual immersive support for HelpiNg users an interaction paradigm for collaborative remote guiding in mixed reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dcve/2016/07563559/12OmNxG1yHz",
"parentPublication": {
"id": "proceedings/3dcve/2016/2138/0",
"title": "2016 IEEE Third VR International Workshop on Collaborative Virtual Environments (3DCVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671839",
"title": "Towards object based manipulation in remote guidance",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671839/12OmNxd4tri",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2012/4651/0/4651a663",
"title": "Instruction for Remote MR Cooperative Work with Captured Still Worker's View Video",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2012/4651a663/12OmNxdDFSs",
"parentPublication": {
"id": "proceedings/aina/2012/4651/0",
"title": "2012 IEEE 26th International Conference on Advanced Information Networking and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a698",
"title": "Understanding the Capabilities of the HoloLens 1 and 2 in a Mixed Reality Environment for Direct Volume Rendering with a Ray-casting Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a698/1CJdm6qNGJq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a538",
"title": "Effects of Clutching Mechanism on Remote Object Manipulation Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a538/1CJf9GYjHMc",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a346",
"title": "Mixed Reality Communication for Medical Procedures: Teaching the Placement of a Central Venous Catheter",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a346/1JrR1uZty2k",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049700",
"title": "Using Virtual Replicas to Improve Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049700/1KYoAxyw5c4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049704",
"title": "A Survey on Remote Assistance and Training in Mixed Reality Environments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049704/1KYowCHxUtO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a393",
"title": "Wearable RemoteFusion: A Mixed Reality Remote Collaboration System with Local Eye Gaze and Remote Hand Gesture Sharing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a393/1gysjIlsYus",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1xPsim7PuRq",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"acronym": "mipr",
"groupId": "1825825",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1xPsmX6Ouvm",
"doi": "10.1109/MIPR51284.2021.00074",
"title": "Detecting and Preventing Faked Mixed Reality",
"normalizedTitle": "Detecting and Preventing Faked Mixed Reality",
"abstract": "Virtualized collaboration can significantly increase remote management of critical infrastructures. Crises such as the current COVID-19 pandemic push the technology: they require remote management to keep our infrastructures running. Mixed Reality (MR) prototypes enable remote management in diverse fields such as medicine, industry 4.0, energy systems, education, or cyber awareness. However, the evolution of virtualized collaboration is still in the beginning. By design, MR is fake: its reality is generated from models. This makes detecting attacks very difficult. Many MR-attacks result from well-known cybersecurity threats. This paper identifies classic attack surfaces, vectors, and concrete threats that are relevant for MR. It presents mitigation methods that can help to secure the underlying data exchanges. However, distributed systems are often heterogeneous and under different management authorities, making securing the entire virtualized remote management stack difficult. The paper therefore also introduces considerations towards an MR-client-based attack detection, i.e., MR-forensics, including relevant features and the use of machine learning.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtualized collaboration can significantly increase remote management of critical infrastructures. Crises such as the current COVID-19 pandemic push the technology: they require remote management to keep our infrastructures running. Mixed Reality (MR) prototypes enable remote management in diverse fields such as medicine, industry 4.0, energy systems, education, or cyber awareness. However, the evolution of virtualized collaboration is still in the beginning. By design, MR is fake: its reality is generated from models. This makes detecting attacks very difficult. Many MR-attacks result from well-known cybersecurity threats. This paper identifies classic attack surfaces, vectors, and concrete threats that are relevant for MR. It presents mitigation methods that can help to secure the underlying data exchanges. However, distributed systems are often heterogeneous and under different management authorities, making securing the entire virtualized remote management stack difficult. The paper therefore also introduces considerations towards an MR-client-based attack detection, i.e., MR-forensics, including relevant features and the use of machine learning.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtualized collaboration can significantly increase remote management of critical infrastructures. Crises such as the current COVID-19 pandemic push the technology: they require remote management to keep our infrastructures running. Mixed Reality (MR) prototypes enable remote management in diverse fields such as medicine, industry 4.0, energy systems, education, or cyber awareness. However, the evolution of virtualized collaboration is still in the beginning. By design, MR is fake: its reality is generated from models. This makes detecting attacks very difficult. Many MR-attacks result from well-known cybersecurity threats. This paper identifies classic attack surfaces, vectors, and concrete threats that are relevant for MR. It presents mitigation methods that can help to secure the underlying data exchanges. However, distributed systems are often heterogeneous and under different management authorities, making securing the entire virtualized remote management stack difficult. The paper therefore also introduces considerations towards an MR-client-based attack detection, i.e., MR-forensics, including relevant features and the use of machine learning.",
"fno": "186500a399",
"keywords": [
"Computer Network Security",
"Virtual Reality",
"Faked Mixed Reality",
"Virtualized Collaboration",
"Critical Infrastructures",
"COVID 19 Pandemic",
"Mixed Reality Prototypes",
"Attack Surfaces",
"MR Client Based Attack Detection",
"Virtualized Remote Management Stack",
"Cybersecurity Threats",
"Pandemics",
"Forensics",
"Pipelines",
"Mixed Reality",
"Collaboration",
"Prototypes",
"Virtual Reality",
"Cybersecurity",
"Mixed Reality",
"Remote Management",
"Deepfake",
"MR Forensics"
],
"authors": [
{
"affiliation": "Technical University of Munich",
"fullName": "Fabian Kilger",
"givenName": "Fabian",
"surname": "Kilger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IMT Atlantique",
"fullName": "Alexandre Kabil",
"givenName": "Alexandre",
"surname": "Kabil",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fraunhofer Gesellschaft",
"fullName": "Volker Tippmann",
"givenName": "Volker",
"surname": "Tippmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich",
"fullName": "Gudrun Klinker",
"givenName": "Gudrun",
"surname": "Klinker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IMT Atlantique",
"fullName": "Marc-Oliver Pahl",
"givenName": "Marc-Oliver",
"surname": "Pahl",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mipr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-09-01T00:00:00",
"pubType": "proceedings",
"pages": "399-405",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1865-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "186500a393",
"articleId": "1xPslfjg9Es",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "186500a406",
"articleId": "1xPsqAR1G8w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvvrhc/1998/8283/0/82830078",
"title": "Vision and Graphics in Producing Mixed Reality Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1",
"parentPublication": {
"id": "proceedings/cvvrhc/1998/8283/0",
"title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a064",
"title": "[POSTER] Remote Mixed Reality System Supporting Interactions with Virtualized Objects",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a064/12OmNzJbQY0",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a940",
"title": "[DC] Improving Multi-User Interaction for Mixed Reality Telecollaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a940/1CJelpv0Txm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a899",
"title": "CADET: A Collaborative Agile Data Exploration Tool for Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a899/1J7WfLETGnu",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a865",
"title": "Learning and Teaching Fluid Dynamics using Augmented and Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a865/1J7Wr5spc76",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a923",
"title": "Cross Reality Authoring: A Mixed Reality Editor approach",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a923/1J7WtZdBjig",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049704",
"title": "A Survey on Remote Assistance and Training in Mixed Reality Environments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049704/1KYowCHxUtO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a009",
"title": "Egocentric viewpoint in mixed reality situated visualization: challenges and opportunities",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a009/1rSRe0PXIgo",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a450",
"title": "Interactive Context-Aware Furniture Recommendation using Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a450/1tnWO8nmeeA",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a274",
"title": "A Mixed-Reality System to Promote Child Engagement in Remote Intergenerational Storytelling",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a274/1yeQMxSyLp6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeQMxSyLp6",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00063",
"title": "A Mixed-Reality System to Promote Child Engagement in Remote Intergenerational Storytelling",
"normalizedTitle": "A Mixed-Reality System to Promote Child Engagement in Remote Intergenerational Storytelling",
"abstract": "We present a mixed reality (MR) storytelling system designed specifically for multi-generational collaboration with child engagement as a key focus. Our \"Let’s Make a Story\" system comprises a two-sided experience that brings together a remote adult and child to tell a story collaboratively. The child has a mixed reality phone-based application with an augmented manipulative that controls the story’s main character. The remote adult participates through a web-based interface. The adult reads the story to the child and helps the child play the story game by providing them with items they need to clear the scenes.In this paper, we detail the implementation of our system and the results of a user study. Eight remote adult-child pairs experienced both the MR and a traditional paper-based storytelling system. To measure engagement, we used questionnaire analysis, engagement time with the story activity, and the word count of the child’s description of how the story should end. We found that children uniformly preferred the MR system, spent more time engaged with the MR system, and used more words to describe how the story should end incorporating details from the game.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a mixed reality (MR) storytelling system designed specifically for multi-generational collaboration with child engagement as a key focus. Our \"Let’s Make a Story\" system comprises a two-sided experience that brings together a remote adult and child to tell a story collaboratively. The child has a mixed reality phone-based application with an augmented manipulative that controls the story’s main character. The remote adult participates through a web-based interface. The adult reads the story to the child and helps the child play the story game by providing them with items they need to clear the scenes.In this paper, we detail the implementation of our system and the results of a user study. Eight remote adult-child pairs experienced both the MR and a traditional paper-based storytelling system. To measure engagement, we used questionnaire analysis, engagement time with the story activity, and the word count of the child’s description of how the story should end. We found that children uniformly preferred the MR system, spent more time engaged with the MR system, and used more words to describe how the story should end incorporating details from the game.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a mixed reality (MR) storytelling system designed specifically for multi-generational collaboration with child engagement as a key focus. Our \"Let’s Make a Story\" system comprises a two-sided experience that brings together a remote adult and child to tell a story collaboratively. The child has a mixed reality phone-based application with an augmented manipulative that controls the story’s main character. The remote adult participates through a web-based interface. The adult reads the story to the child and helps the child play the story game by providing them with items they need to clear the scenes.In this paper, we detail the implementation of our system and the results of a user study. Eight remote adult-child pairs experienced both the MR and a traditional paper-based storytelling system. To measure engagement, we used questionnaire analysis, engagement time with the story activity, and the word count of the child’s description of how the story should end. We found that children uniformly preferred the MR system, spent more time engaged with the MR system, and used more words to describe how the story should end incorporating details from the game.",
"fno": "129800a274",
"keywords": [
"Computer Games",
"Internet",
"Mobile Computing",
"Social Sciences Computing",
"User Interfaces",
"Virtual Reality",
"Child Engagement",
"Remote Intergenerational Storytelling",
"Mixed Reality Storytelling System",
"Multigenerational Collaboration",
"Mixed Reality Phone Based Application",
"Web Based Interface",
"Story Game",
"Remote Adult Child Pairs",
"MR System",
"Mixed Reality",
"Collaboration",
"Virtual Reality",
"Games",
"Time Measurement",
"Augmented Reality",
"Human Centered Computing",
"Augmented Reality",
"Story Telling",
"Human Centered Interface",
"Human Factors",
"Family Communications"
],
"authors": [
{
"affiliation": "Adobe Research",
"fullName": "Jennifer Healey",
"givenName": "Jennifer",
"surname": "Healey",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland College Park",
"fullName": "Duotun Wang",
"givenName": "Duotun",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Curtis Wigington",
"givenName": "Curtis",
"surname": "Wigington",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Tong Sun",
"givenName": "Tong",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland College Park",
"fullName": "Huaishu Peng",
"givenName": "Huaishu",
"surname": "Peng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "274-279",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "129800a268",
"articleId": "1yeQL1J7Xsk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a280",
"articleId": "1yfxMQp0v3W",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223367",
"title": "An experimental study on the virtual representation of children",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223367/12OmNvkGVZf",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvvrhc/1998/8283/0/82830078",
"title": "Vision and Graphics in Producing Mixed Reality Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1",
"parentPublication": {
"id": "proceedings/cvvrhc/1998/8283/0",
"title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a326",
"title": "Workshop on highly diverse cameras and displays for mixed and augmented reality (HDCD4MAR)",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a326/12OmNzlD9rq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isar/2001/1375/0/13750169",
"title": "MR2 (MR Square): A Mixed-Reality Meeting Room",
"doi": null,
"abstractUrl": "/proceedings-article/isar/2001/13750169/12OmNzyYibC",
"parentPublication": {
"id": "proceedings/isar/2001/1375/0",
"title": "Proceedings IEEE and ACM International Symposium on Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2011/03/tlt2011030249",
"title": "A Mobile Mixed-Reality Environment for Children's Storytelling Using a Handheld Projector and a Robot",
"doi": null,
"abstractUrl": "/journal/lt/2011/03/tlt2011030249/13rRUxC0SIH",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a899",
"title": "CADET: A Collaborative Agile Data Exploration Tool for Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a899/1J7WfLETGnu",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a266",
"title": "Toward Methods To Develop Experience Measurements For Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a266/1J7WhrNYPh6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a435",
"title": "Learn Spatial! Introducing the MARBLE-App - A Mixed Reality Approach to Enhance Archaeological Higher Education",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a435/1J7WmNmjy1i",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciddt/2020/0367/0/036700a367",
"title": "Research of interactive experience display of child safety seats based on Mixed Reality technology",
"doi": null,
"abstractUrl": "/proceedings-article/iciddt/2020/036700a367/1wutF1SqGty",
"parentPublication": {
"id": "proceedings/iciddt/2020/0367/0",
"title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a399",
"title": "Detecting and Preventing Faked Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a399/1xPsmX6Ouvm",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyQ7FQL",
"title": "Proceedings. First Joint Eurohaptics Conference and Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems. World Haptics Conference",
"acronym": "whc",
"groupId": "1001635",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvlxJoQ",
"doi": "10.1109/WHC.2005.96",
"title": "Motor Skill Training Assistance Using Haptic Attributes",
"normalizedTitle": "Motor Skill Training Assistance Using Haptic Attributes",
"abstract": "In this paper we describe our efforts to develop a new strategy for providing assistance using haptics in a virtual environment when training for a motor skill. Using a record and play strategy, the proposed assistance method will provide closest possible replication of expert?s skill. We have defined a new paradigm called \"Haptic Attributes\" where we relate a unique haptic force profile to every task performed using motor skills. This has been combined with an earlier concept called Sympathetic Haptic to develop a new paradigm in training complex skill based tasks such as writing, surgery or playing musical instruments. As a demonstration, a virtual environment that can be used for training handwriting was designed and implemented. Position based feedback assistance and training with no assistance were tested against our method in a series of human subject tests. Results prove our method to be superior to training methods tested which use position based or no assistance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we describe our efforts to develop a new strategy for providing assistance using haptics in a virtual environment when training for a motor skill. Using a record and play strategy, the proposed assistance method will provide closest possible replication of expert?s skill. We have defined a new paradigm called \"Haptic Attributes\" where we relate a unique haptic force profile to every task performed using motor skills. This has been combined with an earlier concept called Sympathetic Haptic to develop a new paradigm in training complex skill based tasks such as writing, surgery or playing musical instruments. As a demonstration, a virtual environment that can be used for training handwriting was designed and implemented. Position based feedback assistance and training with no assistance were tested against our method in a series of human subject tests. Results prove our method to be superior to training methods tested which use position based or no assistance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we describe our efforts to develop a new strategy for providing assistance using haptics in a virtual environment when training for a motor skill. Using a record and play strategy, the proposed assistance method will provide closest possible replication of expert?s skill. We have defined a new paradigm called \"Haptic Attributes\" where we relate a unique haptic force profile to every task performed using motor skills. This has been combined with an earlier concept called Sympathetic Haptic to develop a new paradigm in training complex skill based tasks such as writing, surgery or playing musical instruments. As a demonstration, a virtual environment that can be used for training handwriting was designed and implemented. Position based feedback assistance and training with no assistance were tested against our method in a series of human subject tests. Results prove our method to be superior to training methods tested which use position based or no assistance.",
"fno": "23100452",
"keywords": [],
"authors": [
{
"affiliation": "State University of New York at Buffalo",
"fullName": "Govindarajan Srimathveeravalli",
"givenName": "Govindarajan",
"surname": "Srimathveeravalli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State University of New York at Buffalo",
"fullName": "Kesavadas Thenkurussi",
"givenName": "Kesavadas",
"surname": "Thenkurussi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "whc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-03-01T00:00:00",
"pubType": "proceedings",
"pages": "452-457",
"year": "2005",
"issn": null,
"isbn": "0-7695-2310-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "23100446",
"articleId": "12OmNzC5T2s",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "23100458",
"articleId": "12OmNrkBwqJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2010/6821/0/05444635",
"title": "Effects of haptic guidance and disturbance on motor learning: Potential advantage of haptic disturbance",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444635/12OmNBtl1sT",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145145",
"title": "Haptic Feedback Enhances Force Skill Learning",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145145/12OmNrNh0Ci",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260069",
"title": "Haptic Attributes and Human Motor Skills",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260069/12OmNvSKNDj",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptic/2006/0226/0/01627128",
"title": "Haptic Attributes and Human Motor Skills",
"doi": null,
"abstractUrl": "/proceedings-article/haptic/2006/01627128/12OmNvpNIvU",
"parentPublication": {
"id": "proceedings/haptic/2006/0226/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890040",
"title": "Haptic Guidance: Experimental Evaluation of a Haptic Training Method for a Perceptual Motor Skill",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890040/12OmNy68EJI",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479929",
"title": "Validating the Performance of Haptic Motor Skill Training",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479929/12OmNz5apNY",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2012/4702/0/4702a332",
"title": "Feedback in the Motor Skill Domain",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2012/4702a332/12OmNzIUfYD",
"parentPublication": {
"id": "proceedings/icalt/2012/4702/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2009/3858/0/04810805",
"title": "Performance improvement with haptic assistance: A quantitative assessment",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2009/04810805/12OmNzd7bWL",
"parentPublication": {
"id": "proceedings/whc/2009/3858/0",
"title": "World Haptics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/02/06701132",
"title": "Motor Learning Perspectives on Haptic Training for the Upper Extremities",
"doi": null,
"abstractUrl": "/journal/th/2014/02/06701132/13rRUNvyaf8",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/03/06750738",
"title": "Effect of Haptic Assistance on Learning Vehicle Reverse Parking Skills",
"doi": null,
"abstractUrl": "/journal/th/2014/03/06750738/13rRUxZRbo8",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNApcuag",
"title": "IEEE Haptics Symposium 2008",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz5apNY",
"doi": "10.1109/HAPTICS.2008.4479929",
"title": "Validating the Performance of Haptic Motor Skill Training",
"normalizedTitle": "Validating the Performance of Haptic Motor Skill Training",
"abstract": "The effect of haptic interfaces on motor skill training has been widely studied. However, relatively little is known about whether haptic training can promote long-term motor skill acquisition. In this paper, we report two experimental studies that investigated the effectiveness of visuohaptic (visual + haptic) interfaces in helping people develop short-term and long-term motor skills. Our first study compared training outcomes of visuohaptic training, visual training, and no-assistance training. We found that the training outcomes for the tested methods were similar when helping participants develop short-term motor skills. Our second experiment assessed the potential of visual training and visuohaptic training in promoting the development of long-term motor skills. Participants were trained during a four-day-long period. The results showed that the participants gained long-term skills through both training methods, and that the training outcomes for both methods were similar. The results also showed that visuohaptic training is a promising method, but that it needs to be further developed to be useful.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The effect of haptic interfaces on motor skill training has been widely studied. However, relatively little is known about whether haptic training can promote long-term motor skill acquisition. In this paper, we report two experimental studies that investigated the effectiveness of visuohaptic (visual + haptic) interfaces in helping people develop short-term and long-term motor skills. Our first study compared training outcomes of visuohaptic training, visual training, and no-assistance training. We found that the training outcomes for the tested methods were similar when helping participants develop short-term motor skills. Our second experiment assessed the potential of visual training and visuohaptic training in promoting the development of long-term motor skills. Participants were trained during a four-day-long period. The results showed that the participants gained long-term skills through both training methods, and that the training outcomes for both methods were similar. The results also showed that visuohaptic training is a promising method, but that it needs to be further developed to be useful.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The effect of haptic interfaces on motor skill training has been widely studied. However, relatively little is known about whether haptic training can promote long-term motor skill acquisition. In this paper, we report two experimental studies that investigated the effectiveness of visuohaptic (visual + haptic) interfaces in helping people develop short-term and long-term motor skills. Our first study compared training outcomes of visuohaptic training, visual training, and no-assistance training. We found that the training outcomes for the tested methods were similar when helping participants develop short-term motor skills. Our second experiment assessed the potential of visual training and visuohaptic training in promoting the development of long-term motor skills. Participants were trained during a four-day-long period. The results showed that the participants gained long-term skills through both training methods, and that the training outcomes for both methods were similar. The results also showed that visuohaptic training is a promising method, but that it needs to be further developed to be useful.",
"fno": "04479929",
"keywords": [
"Computer Based Training",
"Force Feedback",
"Haptic Interfaces",
"Haptic Motor Skill Training",
"Motor Skill Acquisition",
"Visuohaptic Interfaces",
"Visual Haptic Interfaces",
"Visual Training",
"No Assistance Training",
"Haptic Interfaces",
"Shape Measurement",
"Auditory Displays",
"Imaging Phantoms",
"Learning Systems",
"Brushes",
"Force Measurement",
"Motion Measurement",
"Testing",
"Information Processing",
"Haptic Training",
"Visuohaptic",
"Motor Still",
"Short Term Learning",
"Long Term Learning",
"H 1 2 Models And Principles User Machine Systems Human Information Processing",
"Human Factors",
"H 5 2 Information Interfaces And Presentation User Interfaces Evaluation Methodology",
"Haptic I O"
],
"authors": [
{
"affiliation": "Department of Computing Science, University of Alberta, Canada, xingdong@cs.ualberta.ca",
"fullName": "Xing-Dong Yang",
"givenName": "Xing-Dong",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computing Science, University of Alberta, Canada, wfb@cs.ualberta.ca",
"fullName": "Walter F. Bischof",
"givenName": "Walter F.",
"surname": "Bischof",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computing Science, University of Alberta, Canada, pierreb@cs.ualberta.ca",
"fullName": "Pierre Boulanger",
"givenName": "Pierre",
"surname": "Boulanger",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-03-01T00:00:00",
"pubType": "proceedings",
"pages": "129-135",
"year": "2008",
"issn": "2324-7347",
"isbn": "978-1-4244-2005-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04479913",
"articleId": "12OmNAolH2A",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04479914",
"articleId": "12OmNywxlFr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2010/6821/0/05444635",
"title": "Effects of haptic guidance and disturbance on motor learning: Potential advantage of haptic disturbance",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444635/12OmNBtl1sT",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145145",
"title": "Haptic Feedback Enhances Force Skill Learning",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145145/12OmNrNh0Ci",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260069",
"title": "Haptic Attributes and Human Motor Skills",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260069/12OmNvSKNDj",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2005/2310/0/23100452",
"title": "Motor Skill Training Assistance Using Haptic Attributes",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2005/23100452/12OmNvlxJoQ",
"parentPublication": {
"id": "proceedings/whc/2005/2310/0",
"title": "Proceedings. First Joint Eurohaptics Conference and Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems. World Haptics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890040",
"title": "Haptic Guidance: Experimental Evaluation of a Haptic Training Method for a Perceptual Motor Skill",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890040/12OmNy68EJI",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145256",
"title": "The Effect of Virtual Haptic Training on Real Surgical Drilling Proficiency",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145256/12OmNyeECuX",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/02/06701132",
"title": "Motor Learning Perspectives on Haptic Training for the Upper Extremities",
"doi": null,
"abstractUrl": "/journal/th/2014/02/06701132/13rRUNvyaf8",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2018/7206/0/720600a005",
"title": "Motor Skill Improvement Tool for Apraxia",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2018/720600a005/181W9mmssvg",
"parentPublication": {
"id": "proceedings/chase/2018/7206/0",
"title": "2018 IEEE/ACM International Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049764",
"title": "Effects of Collaborative Training Using Virtual Co-embodiment on Motor Skill Learning",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049764/1KYox5WNvnW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a103",
"title": "The influence of hand visualization in tool-based motor-skills training, a longitudinal study",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a103/1tuAYUEEQNi",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1AIBM1S",
"doi": "10.1109/VR.2018.8448293",
"title": "Studying the Sense of Embodiment in VR Shared Experiences",
"normalizedTitle": "Studying the Sense of Embodiment in VR Shared Experiences",
"abstract": "In this paper, we explore the influence of sharing a virtual environment with another user on the sense of embodiment in virtual reality. For this aim, we conducted an experiment where users were immersed in a virtual environment while being embodied in an anthropomorphic virtual representation of themselves. To evaluate the influence of the presence of another user, two situations were studied: either users were immersed alone, or in the company of another user. During the experiment, participants performed a virtual version of the well-known whac-a-mole game, therefore interacting with the virtual environment, while sitting at a virtual table. Our results show that users were significantly more “efficient” (i.e., faster reaction times), and accordingly more engaged, in performing the task when sharing the virtual environment, in particular for the more competitive tasks. Also, users experienced comparable levels of embodiment both when immersed alone or with another user. These results are supported by subjective questionnaires but also through behavioural responses, e.g. users reacting to the introduction of a threat towards their virtual body. Taken together, our results show that competition and shared experiences involving an avatar do not influence the sense of embodiment, but can increase user engagement. Such insights can be used by designers of virtual environments and virtual reality applications to develop more engaging applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we explore the influence of sharing a virtual environment with another user on the sense of embodiment in virtual reality. For this aim, we conducted an experiment where users were immersed in a virtual environment while being embodied in an anthropomorphic virtual representation of themselves. To evaluate the influence of the presence of another user, two situations were studied: either users were immersed alone, or in the company of another user. During the experiment, participants performed a virtual version of the well-known whac-a-mole game, therefore interacting with the virtual environment, while sitting at a virtual table. Our results show that users were significantly more “efficient” (i.e., faster reaction times), and accordingly more engaged, in performing the task when sharing the virtual environment, in particular for the more competitive tasks. Also, users experienced comparable levels of embodiment both when immersed alone or with another user. These results are supported by subjective questionnaires but also through behavioural responses, e.g. users reacting to the introduction of a threat towards their virtual body. Taken together, our results show that competition and shared experiences involving an avatar do not influence the sense of embodiment, but can increase user engagement. Such insights can be used by designers of virtual environments and virtual reality applications to develop more engaging applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we explore the influence of sharing a virtual environment with another user on the sense of embodiment in virtual reality. For this aim, we conducted an experiment where users were immersed in a virtual environment while being embodied in an anthropomorphic virtual representation of themselves. To evaluate the influence of the presence of another user, two situations were studied: either users were immersed alone, or in the company of another user. During the experiment, participants performed a virtual version of the well-known whac-a-mole game, therefore interacting with the virtual environment, while sitting at a virtual table. Our results show that users were significantly more “efficient” (i.e., faster reaction times), and accordingly more engaged, in performing the task when sharing the virtual environment, in particular for the more competitive tasks. Also, users experienced comparable levels of embodiment both when immersed alone or with another user. These results are supported by subjective questionnaires but also through behavioural responses, e.g. users reacting to the introduction of a threat towards their virtual body. Taken together, our results show that competition and shared experiences involving an avatar do not influence the sense of embodiment, but can increase user engagement. Such insights can be used by designers of virtual environments and virtual reality applications to develop more engaging applications.",
"fno": "08448293",
"keywords": [
"Avatars",
"Computer Games",
"Human Computer Interaction",
"Human Factors",
"VR Shared Experiences",
"Virtual Environment",
"Anthropomorphic Virtual Representation",
"Virtual Version",
"Virtual Table",
"Virtual Body",
"User Engagement",
"Virtual Reality Applications",
"Avatar",
"Avatars",
"Task Analysis",
"Virtual Environments",
"Visualization",
"Electronic Mail",
"Games",
"Human Centered Computing X 2015 Human Computer Interaction HCI X 2015 HCI Design And Evaluation Methods X 2500 User Studies",
"Human Centered Computing X 2015 Human Computer Interaction HCI X 2015 Interaction Paradigms X 2015 Virtual Reality"
],
"authors": [
{
"affiliation": "Inria, CNRS, IRISA, Univ Rennes",
"fullName": "Rebecca Fribourg",
"givenName": "Rebecca",
"surname": "Fribourg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria, CNRS, IRISA, Univ Rennes",
"fullName": "Ferran Argelaguet",
"givenName": "Ferran",
"surname": "Argelaguet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria, CNRS, IRISA, Univ Rennes",
"fullName": "Ludovic Hoyet",
"givenName": "Ludovic",
"surname": "Hoyet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria, CNRS, IRISA, Univ Rennes",
"fullName": "Anatole Lécuyer",
"givenName": "Anatole",
"surname": "Lécuyer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "273-280",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08447562",
"articleId": "13bd1sx4Zt3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08448292",
"articleId": "13bd1sv5NyG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223378",
"title": "Influence of avatar realism on stressful situation in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223378/12OmNBdruc4",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892278",
"title": "Bodiless embodiment: A descriptive survey of avatar bodily coherence in first-wave consumer VR applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892278/12OmNvnwVj4",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714123",
"title": "The Impact of Embodiment and Avatar Sizing on Personal Space in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714123/1B0Y0yXxNbG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a503",
"title": "Studying “Avatar Transitions” in Augmented Reality: Influence on Sense of Embodiment and Physiological Activity",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a503/1J7W9twFolO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a141",
"title": "Petting a cat helps you incarnate the avatar: Influence of the emotions over embodiment in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a141/1JrRepqALbW",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049764",
"title": "Effects of Collaborative Training Using Virtual Co-embodiment on Motor Skill Learning",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049764/1KYox5WNvnW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090457",
"title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/10/09105074",
"title": "Virtual Co-Embodiment: Evaluation of the Sense of Agency While Sharing the Control of a Virtual Body Among Two Individuals",
"doi": null,
"abstractUrl": "/journal/tg/2021/10/09105074/1kj0SvEe6ly",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a452",
"title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09495125",
"title": "Being an Avatar “for Real”: A Survey on Virtual Embodiment in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09495125/1vyju4jl6AE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAlT6IUfu",
"doi": "10.1109/VR50410.2021.00068",
"title": "Walking Outside the Box: Estimation of Detection Thresholds for Non-Forward Steps",
"normalizedTitle": "Walking Outside the Box: Estimation of Detection Thresholds for Non-Forward Steps",
"abstract": "Most virtual reality (VR) experiences are held in limited physical space; therefore, increasing the physical space's spatial efficiency is an essential task for the VR industry. Redirected walking maps a virtual path and a real path with unnoticeable distortion, enabling users to walk through a much bigger virtual space than physical space. To hide the distortion from the user, detection thresholds have been measured, entirely focusing on forward steps. However, it is not uncommon for the user to walk non-forward, that is, sideward and backward in VR. In addition to a forward step, adding options for a non-forward step can expand the VR locomotion in any direction. In this work, we measure the translation and curvature detection thresholds for non-forward steps. The results show similar translation detection thresholds with forward-step and wider detection thresholds for the curvature gain in both backward and sideward step experiments. Having sideward and backward steps in the redirected walking arsenal can add freedom to virtual world design and lead to efficient space usage.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most virtual reality (VR) experiences are held in limited physical space; therefore, increasing the physical space's spatial efficiency is an essential task for the VR industry. Redirected walking maps a virtual path and a real path with unnoticeable distortion, enabling users to walk through a much bigger virtual space than physical space. To hide the distortion from the user, detection thresholds have been measured, entirely focusing on forward steps. However, it is not uncommon for the user to walk non-forward, that is, sideward and backward in VR. In addition to a forward step, adding options for a non-forward step can expand the VR locomotion in any direction. In this work, we measure the translation and curvature detection thresholds for non-forward steps. The results show similar translation detection thresholds with forward-step and wider detection thresholds for the curvature gain in both backward and sideward step experiments. Having sideward and backward steps in the redirected walking arsenal can add freedom to virtual world design and lead to efficient space usage.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most virtual reality (VR) experiences are held in limited physical space; therefore, increasing the physical space's spatial efficiency is an essential task for the VR industry. Redirected walking maps a virtual path and a real path with unnoticeable distortion, enabling users to walk through a much bigger virtual space than physical space. To hide the distortion from the user, detection thresholds have been measured, entirely focusing on forward steps. However, it is not uncommon for the user to walk non-forward, that is, sideward and backward in VR. In addition to a forward step, adding options for a non-forward step can expand the VR locomotion in any direction. In this work, we measure the translation and curvature detection thresholds for non-forward steps. The results show similar translation detection thresholds with forward-step and wider detection thresholds for the curvature gain in both backward and sideward step experiments. Having sideward and backward steps in the redirected walking arsenal can add freedom to virtual world design and lead to efficient space usage.",
"fno": "255600a448",
"keywords": [
"Virtual Reality",
"Virtual Space",
"Forward Step",
"Nonforward Step",
"VR Locomotion",
"Curvature Detection Thresholds",
"Backward Steps",
"Redirected Walking Arsenal",
"Space Usage",
"Virtual Reality Experiences",
"Physical Space",
"VR Industry",
"Redirected Walking Maps",
"Virtual Path",
"Translation Detection Thresholds",
"Spatial Efficiency",
"Curvature Gain",
"Sideward Step",
"Virtual World Design",
"Legged Locomotion",
"Industries",
"Three Dimensional Displays",
"Focusing",
"Estimation",
"Virtual Reality",
"User Interfaces",
"Computing Methodologies Computer Graphics Graphics Systems And Interfaces Virtual Reality",
"Computing Methodologies Computer Graphics Graphics Systems And Interfaces Perception",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality"
],
"authors": [
{
"affiliation": "Yonsei University,Dept. of Computer Science",
"fullName": "Yong-Hun Cho",
"givenName": "Yong-Hun",
"surname": "Cho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University,Dept. of Computer Science",
"fullName": "Dae-Hong Min",
"givenName": "Dae-Hong",
"surname": "Min",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University,Dept. of Computer Science",
"fullName": "Jin-Suk Huh",
"givenName": "Jin-Suk",
"surname": "Huh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University,Dept. of Computer Science",
"fullName": "Se-Hee Lee",
"givenName": "Se-Hee",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University,Dept. of Computer Science",
"fullName": "June-Seop Yoon",
"givenName": "June-Seop",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University,Dept. of Computer Science",
"fullName": "In-Kwon Lee",
"givenName": "In-Kwon",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "448-454",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tuAlLrPZDy",
"name": "pvr202118380-09417724s1-mm_255600a448.zip",
"size": "14.7 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417724s1-mm_255600a448.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "255600a438",
"articleId": "1tuBuuWZCLe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a455",
"articleId": "1tuBfJZ11HG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549412",
"title": "Estimation of detection thresholds for acoustic based redirected walking techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549412/12OmNz2C1yn",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446225",
"title": "Effect of Environment Size on Curvature Redirected Walking Thresholds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a379",
"title": "Effects of Virtual Room Size and Objects on Relative Translation Gain Thresholds in Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a379/1CJcsYYBYJi",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a922",
"title": "Robust Redirected Walking in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a922/1CJfaCP53nq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049692",
"title": "FREE-RDW: A Multi-user Redirected Walking Method for Supporting Non-forward Steps",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049692/1KYopXwY5Vu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798231",
"title": "The Effect of Hanger Reflex on Virtual Reality Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798231/1cJ0KBrAUYE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798146",
"title": "Quick Estimation of Detection Thresholds for Redirected Walking with Method of Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798146/1cJ16Iq9YGc",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798117",
"title": "Estimation of Rotation Gain Thresholds for Redirected Walking Considering FOV and Gender",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798117/1cJ1fo5PwqY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2016/0840/0/07859537",
"title": "The redirected walking toolkit: a unified development platform for exploring large virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2016/07859537/1h0Jm3Gvypy",
"parentPublication": {
"id": "proceedings/wevr/2016/0840/0",
"title": "2016 IEEE 2nd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a550",
"title": "Where are you? Influence of Redirected Walking on Audio-Visual Position Estimation of Co-Located Users",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a550/1tnWDmPDtHG",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrQUpt2CME",
"doi": "10.1109/ISMAR55827.2022.00026",
"title": "Towards Forecasting the Onset of Cybersickness by Fusing Physiological, Head-tracking and Eye-tracking with Multimodal Deep Fusion Network",
"normalizedTitle": "Towards Forecasting the Onset of Cybersickness by Fusing Physiological, Head-tracking and Eye-tracking with Multimodal Deep Fusion Network",
"abstract": "A plethora of studies has been conducted to detect and reduce cybersickness in real-time. However, prior attempts to detect and minimize cybersickness after its onset may be ineffective as the onset tends to persist beyond its first occurrence. By forecasting the onset of cybersickness, it may be possible to mitigate the severity of cybersickness through earlier interventions. This research proposed a multimodal deep fusion approach to forecast cybersickness from the user’s physiological, head-tracking, and eye-tracking data. We proposed several hybrid multimodal deep fusion neural networks with Long short-term memory (LSTMs), Neural basis expansion analysis for interpretable time series forecasting(NBEATs) and Deep Temporal Convolutional Networks(DeepTCN) neural models to forecast cybersickness 30-60s in advance to its onset. To validate our proposed approach, we recruited 30 participants who were immersed in five virtual reality simulations. We collected eye-tracking, head-tracking, heart rate, and galvanic skin response data and used the fast-motion scale as ground truth. Our results suggest that the DeepTCN model with our proposed multimodal fusion network can forecast cybersickness onset 60 seconds in advance with a root-mean-square error of 0.49 (on a scale from 0-10). Furthermore, our results demonstrated that fusing eye tracking, heart rate, and galvanic skin response data outperformed other data fusion approaches. This research clarifies how early cybersickness can be forecast, paving the way for future research on early cybersickness mitigation approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A plethora of studies has been conducted to detect and reduce cybersickness in real-time. However, prior attempts to detect and minimize cybersickness after its onset may be ineffective as the onset tends to persist beyond its first occurrence. By forecasting the onset of cybersickness, it may be possible to mitigate the severity of cybersickness through earlier interventions. This research proposed a multimodal deep fusion approach to forecast cybersickness from the user’s physiological, head-tracking, and eye-tracking data. We proposed several hybrid multimodal deep fusion neural networks with Long short-term memory (LSTMs), Neural basis expansion analysis for interpretable time series forecasting(NBEATs) and Deep Temporal Convolutional Networks(DeepTCN) neural models to forecast cybersickness 30-60s in advance to its onset. To validate our proposed approach, we recruited 30 participants who were immersed in five virtual reality simulations. We collected eye-tracking, head-tracking, heart rate, and galvanic skin response data and used the fast-motion scale as ground truth. Our results suggest that the DeepTCN model with our proposed multimodal fusion network can forecast cybersickness onset 60 seconds in advance with a root-mean-square error of 0.49 (on a scale from 0-10). Furthermore, our results demonstrated that fusing eye tracking, heart rate, and galvanic skin response data outperformed other data fusion approaches. This research clarifies how early cybersickness can be forecast, paving the way for future research on early cybersickness mitigation approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A plethora of studies has been conducted to detect and reduce cybersickness in real-time. However, prior attempts to detect and minimize cybersickness after its onset may be ineffective as the onset tends to persist beyond its first occurrence. By forecasting the onset of cybersickness, it may be possible to mitigate the severity of cybersickness through earlier interventions. This research proposed a multimodal deep fusion approach to forecast cybersickness from the user’s physiological, head-tracking, and eye-tracking data. We proposed several hybrid multimodal deep fusion neural networks with Long short-term memory (LSTMs), Neural basis expansion analysis for interpretable time series forecasting(NBEATs) and Deep Temporal Convolutional Networks(DeepTCN) neural models to forecast cybersickness 30-60s in advance to its onset. To validate our proposed approach, we recruited 30 participants who were immersed in five virtual reality simulations. We collected eye-tracking, head-tracking, heart rate, and galvanic skin response data and used the fast-motion scale as ground truth. Our results suggest that the DeepTCN model with our proposed multimodal fusion network can forecast cybersickness onset 60 seconds in advance with a root-mean-square error of 0.49 (on a scale from 0-10). Furthermore, our results demonstrated that fusing eye tracking, heart rate, and galvanic skin response data outperformed other data fusion approaches. This research clarifies how early cybersickness can be forecast, paving the way for future research on early cybersickness mitigation approaches.",
"fno": "532500a121",
"keywords": [
"Convolutional Neural Nets",
"Deep Learning Artificial Intelligence",
"Gaze Tracking",
"Mean Square Error Methods",
"Medical Image Processing",
"Physiology",
"Recurrent Neural Nets",
"Sensor Fusion",
"Time Series",
"Virtual Reality",
"Cybersickness Mitigation Approaches",
"Data Fusion Approaches",
"Deep Temporal Convolutional Networks",
"Deep TCN",
"Eye Tracking Data",
"Head Tracking",
"Hybrid Multimodal Deep Fusion Neural Networks",
"Long Short Term Memory",
"LST Ms",
"Multimodal Fusion Network",
"NBEA Ts",
"Neural Basis Expansion Analysis For Interpretable Time Series Forecasting",
"Root Mean Square Error",
"Users Physiological",
"Virtual Reality Simulations",
"Heart Rate",
"Solid Modeling",
"Cybersickness",
"Neural Networks",
"Time Series Analysis",
"Gaze Tracking",
"Predictive Models",
"Cybersickness",
"Forecasting",
"Eye Tracking",
"Physiological Data",
"Deep Fusion",
"Multimodal Deep Neural Network",
"Human Centered Computing",
"Visualization",
"Visualization Techniques",
"Treemaps",
"Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "Northeastern University",
"fullName": "Rifatul Islam",
"givenName": "Rifatul",
"surname": "Islam",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Texas at San Antonio",
"fullName": "Kevin Desai",
"givenName": "Kevin",
"surname": "Desai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Texas at San Antonio",
"fullName": "John Quarles",
"givenName": "John",
"surname": "Quarles",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "121-130",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "532500a112",
"articleId": "1JrRo67MnwQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a131",
"articleId": "1JrRdnGe43C",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2023/05/10049731",
"title": "Cybersickness, Cognition, & Motor Skills: The Effects of Music, Gender, and Gaming Experience",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049731/1KYow8CUV20",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a609",
"title": "LiteVR: Interpretable and Lightweight Cybersickness Detection using Explainable AI",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a609/1MNgzF7scM0",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797748",
"title": "Developing an Accessible Evaluation Method of VR Cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797748/1cJ17GWH4f6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089513",
"title": "Comparative Evaluation of the Effects of Motion Control on Cybersickness in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089513/1jIx7SE9LiM",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089551",
"title": "A Structural Equation Modeling Approach to Understand the Relationship between Control, Cybersickness and Presence in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089551/1jIx95ncylO",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090495",
"title": "Automatic Detection of Cybersickness from Physiological Signal in a Virtual Roller Coaster Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090495/1jIximIpClq",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090494",
"title": "A Deep Learning based Framework for Detecting and Reducing onset of Cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090494/1jIxuKp865y",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a400",
"title": "Automatic Detection and Prediction of Cybersickness Severity using Deep Neural Networks from user’s Physiological Signals",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a400/1pyswQ0oYOQ",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a123",
"title": "Exploring the feasibility of mitigating VR-HMD-induced cybersickness using cathodal transcranial direct current stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a123/1qpzDMNZnKo",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a031",
"title": "Cybersickness Prediction from Integrated HMD’s Sensors: A Multimodal Deep Fusion Approach using Eye-tracking and Head-tracking Data",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a031/1yeCV8NQEE0",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgzF7scM0",
"doi": "10.1109/VR55154.2023.00076",
"title": "LiteVR: Interpretable and Lightweight Cybersickness Detection using Explainable AI",
"normalizedTitle": "LiteVR: Interpretable and Lightweight Cybersickness Detection using Explainable AI",
"abstract": "Cybersickness is a common ailment associated with virtual reality (VR) user experiences. Several automated methods exist based on machine learning (ML) and deep learning (DL) to detect cyber-sickness. However, most of these cybersickness detection methods are perceived as computationally intensive and black-box methods. Thus, those techniques are neither trustworthy nor practical for deploying on standalone energy-constrained VR head-mounted devices (HMDs). In this work, we present an explainable artificial intelligence (XAI)-based framework Lite VR for cybersickness detection, explaining the model's outcome, reducing the feature dimensions, and overall computational costs. First, we develop three cybersick-ness DL models based on long-term short-term memory (LSTM), gated recurrent unit (GRU), and multilayer perceptron (MLP). Then, we employed a post-hoc explanation, such as SHapley Additive Explanations (SHAP), to explain the results and extract the most dominant features of cybersickness. Finally, we retrain the DL models with the reduced number of features. Our results show that eye-tracking features are the most dominant for cybersickness detection. Furthermore, based on the XAI-based feature ranking and dimensionality reduction, we significantly reduce the model's size by up to 4.3×, training time by up to 5.6×, and its inference time by up to 3.8×, with higher cybersickness detection accuracy and low regression error (i.e., on Fast Motion Scale (FMS)). Our proposed lite LSTM model obtained an accuracy of 94% in classifying cyber-sickness and regressing (i.e., FMS 1–10) with a Root Mean Square Error (RMSE) of 0.30, which outperforms the state-of-the-art. Our proposed Lite VR framework can help researchers and practitioners analyze, detect, and deploy their DL-based cybersickness detection models in standalone VR HMDs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cybersickness is a common ailment associated with virtual reality (VR) user experiences. Several automated methods exist based on machine learning (ML) and deep learning (DL) to detect cyber-sickness. However, most of these cybersickness detection methods are perceived as computationally intensive and black-box methods. Thus, those techniques are neither trustworthy nor practical for deploying on standalone energy-constrained VR head-mounted devices (HMDs). In this work, we present an explainable artificial intelligence (XAI)-based framework Lite VR for cybersickness detection, explaining the model's outcome, reducing the feature dimensions, and overall computational costs. First, we develop three cybersick-ness DL models based on long-term short-term memory (LSTM), gated recurrent unit (GRU), and multilayer perceptron (MLP). Then, we employed a post-hoc explanation, such as SHapley Additive Explanations (SHAP), to explain the results and extract the most dominant features of cybersickness. Finally, we retrain the DL models with the reduced number of features. Our results show that eye-tracking features are the most dominant for cybersickness detection. Furthermore, based on the XAI-based feature ranking and dimensionality reduction, we significantly reduce the model's size by up to 4.3×, training time by up to 5.6×, and its inference time by up to 3.8×, with higher cybersickness detection accuracy and low regression error (i.e., on Fast Motion Scale (FMS)). Our proposed lite LSTM model obtained an accuracy of 94% in classifying cyber-sickness and regressing (i.e., FMS 1–10) with a Root Mean Square Error (RMSE) of 0.30, which outperforms the state-of-the-art. Our proposed Lite VR framework can help researchers and practitioners analyze, detect, and deploy their DL-based cybersickness detection models in standalone VR HMDs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cybersickness is a common ailment associated with virtual reality (VR) user experiences. Several automated methods exist based on machine learning (ML) and deep learning (DL) to detect cyber-sickness. However, most of these cybersickness detection methods are perceived as computationally intensive and black-box methods. Thus, those techniques are neither trustworthy nor practical for deploying on standalone energy-constrained VR head-mounted devices (HMDs). In this work, we present an explainable artificial intelligence (XAI)-based framework Lite VR for cybersickness detection, explaining the model's outcome, reducing the feature dimensions, and overall computational costs. First, we develop three cybersick-ness DL models based on long-term short-term memory (LSTM), gated recurrent unit (GRU), and multilayer perceptron (MLP). Then, we employed a post-hoc explanation, such as SHapley Additive Explanations (SHAP), to explain the results and extract the most dominant features of cybersickness. Finally, we retrain the DL models with the reduced number of features. Our results show that eye-tracking features are the most dominant for cybersickness detection. Furthermore, based on the XAI-based feature ranking and dimensionality reduction, we significantly reduce the model's size by up to 4.3×, training time by up to 5.6×, and its inference time by up to 3.8×, with higher cybersickness detection accuracy and low regression error (i.e., on Fast Motion Scale (FMS)). Our proposed lite LSTM model obtained an accuracy of 94% in classifying cyber-sickness and regressing (i.e., FMS 1–10) with a Root Mean Square Error (RMSE) of 0.30, which outperforms the state-of-the-art. Our proposed Lite VR framework can help researchers and practitioners analyze, detect, and deploy their DL-based cybersickness detection models in standalone VR HMDs.",
"fno": "481500a609",
"keywords": [
"Training",
"Solid Modeling",
"Frequency Modulation",
"Three Dimensional Displays",
"Cybersickness",
"Computational Modeling",
"Gaze Tracking",
"Virtual Reality",
"Cybersickness Detection",
"Explainable Artificial Intelligence",
"Deep Learning",
"Model Reduction"
],
"authors": [
{
"affiliation": "University of Missouri",
"fullName": "Ripan Kumar Kundu",
"givenName": "Ripan Kumar",
"surname": "Kundu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kennesaw State University",
"fullName": "Rifatul Islam",
"givenName": "Rifatul",
"surname": "Islam",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Texas at San Antonio",
"fullName": "John Quarles",
"givenName": "John",
"surname": "Quarles",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Missouri",
"fullName": "Khaza Anuarul Hoque",
"givenName": "Khaza Anuarul",
"surname": "Hoque",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "609-619",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "481500a603",
"articleId": "1MNgKRrqL6g",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a620",
"articleId": "1MNgG4plx6w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2022/5325/0/532500a121",
"title": "Towards Forecasting the Onset of Cybersickness by Fusing Physiological, Head-tracking and Eye-tracking with Multimodal Deep Fusion Network",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a121/1JrQUpt2CME",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a777",
"title": "TruVR: Trustworthy Cybersickness Detection using Explainable Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a777/1JrR1CsIUjC",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a307",
"title": "Demographic and Behavioral Correlates of Cybersickness: A Large Lab-in-the-Field Study of 837 Participants",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a307/1JrRjge0g6I",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049731",
"title": "Cybersickness, Cognition, & Motor Skills: The Effects of Music, Gender, and Gaming Experience",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049731/1KYow8CUV20",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089513",
"title": "Comparative Evaluation of the Effects of Motion Control on Cybersickness in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089513/1jIx7SE9LiM",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089551",
"title": "A Structural Equation Modeling Approach to Understand the Relationship between Control, Cybersickness and Presence in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089551/1jIx95ncylO",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a351",
"title": "A Review of Deep Learning Approaches to EEG-Based Classification of Cybersickness in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a351/1qpzzTXUIgw",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a156",
"title": "A new device to restore sensory congruency in virtual reality and to prevent cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a156/1tnWwDLMCAw",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a373",
"title": "Using Fuzzy Logic to Involve Individual Differences for Predicting Cybersickness during VR Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a373/1tuAPQPWR2g",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a138",
"title": "Using Trajectory Compression Rate to Predict Changes in Cybersickness in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a138/1yeD4ffM0c8",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXecXunPa",
"doi": "10.1109/VRW52623.2021.00037",
"title": "Individual Differences & Task Attention in Cybersickness: A Call for a Standardized Approach to Data Sharing",
"normalizedTitle": "Individual Differences & Task Attention in Cybersickness: A Call for a Standardized Approach to Data Sharing",
"abstract": "Cybersickness research has lacked a focus on individual differences, particularly in the phases of cybersickness after initial sensitivity: the rate of adaptation during the stimulus, the rate of recovery after the stimulus, and the rate of habituation. Additionally, research has neglected the exploration of the impact of a task’s attentional load on cybersickness. Finally, addressing these gaps within the cybersickness research community could be more effective if more standardized, \"menu-driven\" experimental protocols could be agreed upon, as well as a set of reporting standards to expedite a deeper understanding of cybersickness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cybersickness research has lacked a focus on individual differences, particularly in the phases of cybersickness after initial sensitivity: the rate of adaptation during the stimulus, the rate of recovery after the stimulus, and the rate of habituation. Additionally, research has neglected the exploration of the impact of a task’s attentional load on cybersickness. Finally, addressing these gaps within the cybersickness research community could be more effective if more standardized, \"menu-driven\" experimental protocols could be agreed upon, as well as a set of reporting standards to expedite a deeper understanding of cybersickness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cybersickness research has lacked a focus on individual differences, particularly in the phases of cybersickness after initial sensitivity: the rate of adaptation during the stimulus, the rate of recovery after the stimulus, and the rate of habituation. Additionally, research has neglected the exploration of the impact of a task’s attentional load on cybersickness. Finally, addressing these gaps within the cybersickness research community could be more effective if more standardized, \"menu-driven\" experimental protocols could be agreed upon, as well as a set of reporting standards to expedite a deeper understanding of cybersickness.",
"fno": "405700a161",
"keywords": [
"Human Factors",
"Virtual Reality",
"Data Sharing",
"Initial Sensitivity",
"Cybersickness Research Community",
"Standardized Menu Driven",
"Reporting Standards",
"Individual Differences",
"Standardized Approach",
"Task Attention",
"Virtual Reality",
"Human Computer Interaction",
"Three Dimensional Displays",
"Sensitivity",
"Protocols",
"Cybersickness",
"Conferences",
"Task Analysis",
"Cybersickness",
"Individual Differences",
"Cognitive Load",
"Attention"
],
"authors": [
{
"affiliation": "Iowa State University",
"fullName": "Stephen B. Gilbert",
"givenName": "Stephen B.",
"surname": "Gilbert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University",
"fullName": "Angelica Jasper",
"givenName": "Angelica",
"surname": "Jasper",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University",
"fullName": "Nathan C. Sepich",
"givenName": "Nathan C.",
"surname": "Sepich",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University",
"fullName": "Taylor A. Doty",
"givenName": "Taylor A.",
"surname": "Doty",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University",
"fullName": "Jonathan W. Kelly",
"givenName": "Jonathan W.",
"surname": "Kelly",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University",
"fullName": "Michael C. Dorneich",
"givenName": "Michael C.",
"surname": "Dorneich",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "161-164",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a156",
"articleId": "1tnWwDLMCAw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a165",
"articleId": "1tnXfgszrMc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vrw/2022/8402/0/840200a528",
"title": "Human Factors Related to Cybersickness Tolerance in Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a528/1CJcDQEpCqA",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a777",
"title": "TruVR: Trustworthy Cybersickness Detection using Explainable Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a777/1JrR1CsIUjC",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049731",
"title": "Cybersickness, Cognition, & Motor Skills: The Effects of Music, Gender, and Gaming Experience",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049731/1KYow8CUV20",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a561",
"title": "You Make Me Sick! The Effect of Stairs on Presence, Cybersickness, and Perception of Embodied Conversational Agents",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a561/1MNgq5zE1BS",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089551",
"title": "A Structural Equation Modeling Approach to Understand the Relationship between Control, Cybersickness and Presence in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089551/1jIx95ncylO",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a351",
"title": "A Review of Deep Learning Approaches to EEG-Based Classification of Cybersickness in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a351/1qpzzTXUIgw",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a156",
"title": "A new device to restore sensory congruency in virtual reality and to prevent cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a156/1tnWwDLMCAw",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a165",
"title": "Subject 001 - A Detailed Self-Report of Virtual Reality Induced Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a165/1tnXfgszrMc",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a373",
"title": "Using Fuzzy Logic to Involve Individual Differences for Predicting Cybersickness during VR Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a373/1tuAPQPWR2g",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a138",
"title": "Using Trajectory Compression Rate to Predict Changes in Cybersickness in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a138/1yeD4ffM0c8",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1ANLL7CrjfG",
"title": "2021 Fifth IEEE International Conference on Robotic Computing (IRC)",
"acronym": "irc",
"groupId": "1819925",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1ANLPqybX1e",
"doi": "10.1109/IRC52146.2021.00029",
"title": "Object Detection and Segmentation using LiDAR-Camera Fusion for Autonomous Vehicle",
"normalizedTitle": "Object Detection and Segmentation using LiDAR-Camera Fusion for Autonomous Vehicle",
"abstract": "The Light detection and ranging (LiDAR) sensor plays a crucial role in perceiving the environment for an autonomous vehicle. But, in many scenarios LiDAR is unable to capture important information, for example, traffic light signals. This kind of scenario can be avoided by using camera images with LiDAR data. But, the system will not work effectively, if there is no proper calibration and synchronization between camera images and LiDAR data. In this paper, we have demonstrated a system, where objects are synchronously detected and segmented in both images and LiDAR data from KITTI datasets. Currently, the system is working in real-time using Robot Operating System (ROS) and can process up to 10 frames of image and point cloud data per second.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Light detection and ranging (LiDAR) sensor plays a crucial role in perceiving the environment for an autonomous vehicle. But, in many scenarios LiDAR is unable to capture important information, for example, traffic light signals. This kind of scenario can be avoided by using camera images with LiDAR data. But, the system will not work effectively, if there is no proper calibration and synchronization between camera images and LiDAR data. In this paper, we have demonstrated a system, where objects are synchronously detected and segmented in both images and LiDAR data from KITTI datasets. Currently, the system is working in real-time using Robot Operating System (ROS) and can process up to 10 frames of image and point cloud data per second.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Light detection and ranging (LiDAR) sensor plays a crucial role in perceiving the environment for an autonomous vehicle. But, in many scenarios LiDAR is unable to capture important information, for example, traffic light signals. This kind of scenario can be avoided by using camera images with LiDAR data. But, the system will not work effectively, if there is no proper calibration and synchronization between camera images and LiDAR data. In this paper, we have demonstrated a system, where objects are synchronously detected and segmented in both images and LiDAR data from KITTI datasets. Currently, the system is working in real-time using Robot Operating System (ROS) and can process up to 10 frames of image and point cloud data per second.",
"fno": "341600a123",
"keywords": [
"Calibration",
"Cameras",
"Image Segmentation",
"Image Sensors",
"Mobile Robots",
"Object Detection",
"Optical Radar",
"Li DAR Camera Fusion",
"Autonomous Vehicle",
"Light Detection",
"Scenarios Li DAR",
"Traffic Light Signals",
"Camera Images",
"Li DAR Data",
"Proper Calibration",
"Synchronization",
"Point Cloud Data",
"Point Cloud Compression",
"Image Segmentation",
"Laser Radar",
"Operating Systems",
"Robot Vision Systems",
"Object Detection",
"Cameras",
"Li DAR",
"Point Cloud",
"ROS",
"Camera",
"Image"
],
"authors": [
{
"affiliation": "Indian Institute of Technology Hyderabad,Department of Electrical Engineering,Hyderabad,India",
"fullName": "Mrinal Senapati",
"givenName": "Mrinal",
"surname": "Senapati",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Institute of Technology Hyderabad,Department of Electrical Engineering,Hyderabad,India",
"fullName": "Bhaskar Anand",
"givenName": "Bhaskar",
"surname": "Anand",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Institute of Technology Hyderabad,Department of Electrical Engineering,Hyderabad,India",
"fullName": "Abhishek Thakur",
"givenName": "Abhishek",
"surname": "Thakur",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Institute of Technology Hyderabad,Department of Electrical Engineering,Hyderabad,India",
"fullName": "Harshal Verma",
"givenName": "Harshal",
"surname": "Verma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Institute of Technology Hyderabad,Department of Electrical Engineering,Hyderabad,India",
"fullName": "P. Rajalakshmi",
"givenName": "P.",
"surname": "Rajalakshmi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "irc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "123-124",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3416-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "341600a121",
"articleId": "1ANLNgcx320",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "341600a125",
"articleId": "1ANLMKsRFmM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/rtcsa/2016/2479/0/2479a104",
"title": "A Low-Complexity Scheme for Partially Occluded Pedestrian Detection Using LIDAR-RADAR Sensor Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/rtcsa/2016/2479a104/12OmNxisQPE",
"parentPublication": {
"id": "proceedings/rtcsa/2016/2479/0",
"title": "2016 IEEE 22nd International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a394",
"title": "Accurate Calibration of LiDAR-Camera Systems Using Ordinary Boxes",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a394/12OmNyTOsk0",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200q6260",
"title": "Perception-Aware Multi-Sensor Fusion for 3D LiDAR Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200q6260/1BmJKbCSk5G",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600b080",
"title": "TransFusion: Robust LiDAR-Camera Fusion for 3D Object Detection with Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600b080/1H0Oy2RzAJi",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600a908",
"title": "Modality-Agnostic Learning for Radar-Lidar Fusion in Vehicle Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600a908/1H1myLTR7Lq",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956145",
"title": "CALNet: LiDAR-Camera Online Calibration With Channel Attention and Liquid Time-Constant Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956145/1IHpG5xhKyA",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eiect/2022/9956/0/995600a410",
"title": "Lidar-Camera Fusion Based on KD-Tree Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/eiect/2022/995600a410/1LHctpPSsRq",
"parentPublication": {
"id": "proceedings/eiect/2022/9956/0",
"title": "2022 2nd International Conference on Electronic Information Engineering and Computer Technology (EIECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c393",
"title": "FuseMODNet: Real-Time Camera and LiDAR Based Moving Object Detection for Robust Low-Light Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c393/1i5mOKRUI48",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscipt/2021/4137/0/413700a419",
"title": "Research on the Fusion Method for Vehicle Shape-position Based on Binocular Camera and Lidar",
"doi": null,
"abstractUrl": "/proceedings-article/iscipt/2021/413700a419/1zzpBWQHWvK",
"parentPublication": {
"id": "proceedings/iscipt/2021/4137/0",
"title": "2021 6th International Symposium on Computer and Information Processing Technology (ISCIPT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscipt/2021/4137/0/413700a388",
"title": "A Space Joint calibration method for lidar and camera on self-driving car and its experimental verification",
"doi": null,
"abstractUrl": "/proceedings-article/iscipt/2021/413700a388/1zzpBbddG6s",
"parentPublication": {
"id": "proceedings/iscipt/2021/4137/0",
"title": "2021 6th International Symposium on Computer and Information Processing Technology (ISCIPT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmFBHEQzG8",
"doi": "10.1109/ICCV48922.2021.00374",
"title": "Multi-Echo LiDAR for 3D Object Detection",
"normalizedTitle": "Multi-Echo LiDAR for 3D Object Detection",
"abstract": "LiDAR sensors can be used to obtain a wide range of measurement signals other than a simple 3D point cloud, and those signals can be leveraged to improve perception tasks like 3D object detection. A single laser pulse can be partially reflected by multiple objects along its path, resulting in multiple measurements called echoes. Multi-echo measurement can provide information about object contours and semi-transparent surfaces which can be used to better identify and locate objects. LiDAR can also measure surface reflectance (intensity of laser pulse return), as well as ambient light of the scene (sunlight reflected by objects). These signals are already available in commercial LiDAR devices but have not been used in most LiDAR-based detection models. We present a 3D object detection model which leverages the full spectrum of measurement signals provided by LiDAR. First, we propose a multi-signal fusion (MSF) module to combine (1) the reflectance and ambient features extracted with a 2D CNN, and (2) point cloud features extracted using a 3D graph neural network (GNN). Second, we propose a multi-echo aggregation (MEA) module to combine the information encoded in different sets of echo points. Compared with traditional single echo point cloud methods, our proposed Multi-Signal LiDAR Detector (MSLiD) extracts richer context information from a wider range of sensing measurements and achieves more accurate 3D object detection. Experiments show that by incorporating the multi-modality of LiDAR, our method outperforms the state-of-the-art by up to relatively 9.1%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "LiDAR sensors can be used to obtain a wide range of measurement signals other than a simple 3D point cloud, and those signals can be leveraged to improve perception tasks like 3D object detection. A single laser pulse can be partially reflected by multiple objects along its path, resulting in multiple measurements called echoes. Multi-echo measurement can provide information about object contours and semi-transparent surfaces which can be used to better identify and locate objects. LiDAR can also measure surface reflectance (intensity of laser pulse return), as well as ambient light of the scene (sunlight reflected by objects). These signals are already available in commercial LiDAR devices but have not been used in most LiDAR-based detection models. We present a 3D object detection model which leverages the full spectrum of measurement signals provided by LiDAR. First, we propose a multi-signal fusion (MSF) module to combine (1) the reflectance and ambient features extracted with a 2D CNN, and (2) point cloud features extracted using a 3D graph neural network (GNN). Second, we propose a multi-echo aggregation (MEA) module to combine the information encoded in different sets of echo points. Compared with traditional single echo point cloud methods, our proposed Multi-Signal LiDAR Detector (MSLiD) extracts richer context information from a wider range of sensing measurements and achieves more accurate 3D object detection. Experiments show that by incorporating the multi-modality of LiDAR, our method outperforms the state-of-the-art by up to relatively 9.1%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "LiDAR sensors can be used to obtain a wide range of measurement signals other than a simple 3D point cloud, and those signals can be leveraged to improve perception tasks like 3D object detection. A single laser pulse can be partially reflected by multiple objects along its path, resulting in multiple measurements called echoes. Multi-echo measurement can provide information about object contours and semi-transparent surfaces which can be used to better identify and locate objects. LiDAR can also measure surface reflectance (intensity of laser pulse return), as well as ambient light of the scene (sunlight reflected by objects). These signals are already available in commercial LiDAR devices but have not been used in most LiDAR-based detection models. We present a 3D object detection model which leverages the full spectrum of measurement signals provided by LiDAR. First, we propose a multi-signal fusion (MSF) module to combine (1) the reflectance and ambient features extracted with a 2D CNN, and (2) point cloud features extracted using a 3D graph neural network (GNN). Second, we propose a multi-echo aggregation (MEA) module to combine the information encoded in different sets of echo points. Compared with traditional single echo point cloud methods, our proposed Multi-Signal LiDAR Detector (MSLiD) extracts richer context information from a wider range of sensing measurements and achieves more accurate 3D object detection. Experiments show that by incorporating the multi-modality of LiDAR, our method outperforms the state-of-the-art by up to relatively 9.1%.",
"fno": "281200d743",
"keywords": [
"Point Cloud Compression",
"Reflectivity",
"Laser Radar",
"Three Dimensional Displays",
"Pulse Measurements",
"Measurement By Laser Beam",
"Object Detection",
"Detection And Localization In 2 D And 3 D",
"Stereo",
"3 D From Multiview And Other Sensors",
"Vision For Robotics And Autonomous Vehicles"
],
"authors": [
{
"affiliation": "Carnegie Mellon University",
"fullName": "Yunze Man",
"givenName": "Yunze",
"surname": "Man",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University",
"fullName": "Xinshuo Weng",
"givenName": "Xinshuo",
"surname": "Weng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "DENSO",
"fullName": "Prasanna Kumar Sivakumar",
"givenName": "Prasanna Kumar",
"surname": "Sivakumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University",
"fullName": "Matthew O’Toole",
"givenName": "Matthew",
"surname": "O’Toole",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University",
"fullName": "Kris Kitani",
"givenName": "Kris",
"surname": "Kitani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "3743-3752",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200d732",
"articleId": "1BmKYjC1XcQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200d753",
"articleId": "1BmK2U8rDdm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itng/2015/8828/0/8828a680",
"title": "An Experiment of Mutual Interference between Automotive LIDAR Scanners",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2015/8828a680/12OmNyLA5Ae",
"parentPublication": {
"id": "proceedings/itng/2015/8828/0",
"title": "2015 12th International Conference on Information Technology - New Generations (ITNG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2022/6803/0/680300a443",
"title": "Ultra High SNR 3D Imaging Clustered LiDAR Technique for Underwater Targets",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2022/680300a443/1FUVMV1Uctq",
"parentPublication": {
"id": "proceedings/icceai/2022/6803/0",
"title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600q6343",
"title": "LiDAR Snowfall Simulation for Robust 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600q6343/1H0LcEoVh6g",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7161",
"title": "DeepFusion: Lidar-Camera Deep Fusion for Multi-Modal 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7161/1H0Nse4n2I8",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i459",
"title": "Point Density-Aware Voxels for LiDAR 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i459/1H1kUoicss8",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7151",
"title": "LIFT: Learning 4D LiDAR Image Fusion Transformer for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7151/1H1m3oPR5HW",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/9.346E244",
"title": "Li3DeTr: A LiDAR based 3D Detection Transformer",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/9.346E244/1L8qywkQwXm",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300a857",
"title": "Monocular 3D Object Detection with Pseudo-LiDAR Point Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300a857/1i5mFr4yElW",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c320",
"title": "Range Adaptation for 3D Object Detection in LiDAR",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c320/1i5mHJqonHW",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900e708",
"title": "LiDAR-Aug: A General Rendering-based Augmentation Framework for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900e708/1yeLZJcaFnW",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmL0hDh5Uk",
"doi": "10.1109/ICCV48922.2021.01500",
"title": "Fog Simulation on Real LiDAR Point Clouds for 3D Object Detection in Adverse Weather",
"normalizedTitle": "Fog Simulation on Real LiDAR Point Clouds for 3D Object Detection in Adverse Weather",
"abstract": "This work addresses the challenging task of LiDAR-based 3D object detection in foggy weather. Collecting and annotating data in such a scenario is very time, labor and cost intensive. In this paper, we tackle this problem by simulating physically accurate fog into clear-weather scenes, so that the abundant existing real datasets captured in clear weather can be repurposed for our task. Our contributions are twofold: 1) We develop a physically valid fog simulation method that is applicable to any LiDAR dataset. This unleashes the acquisition of large-scale foggy training data at no extra cost. These partially synthetic data can be used to improve the robustness of several perception methods, such as 3D object detection and tracking or simultaneous localization and mapping, on real foggy data. 2) Through extensive experiments with several state-of-the-art detection approaches, we show that our fog simulation can be leveraged to significantly improve the performance for 3D object detection in the presence of fog. Thus, we are the first to provide strong 3D object detection baselines on the Seeing Through Fog dataset. Our code is available at www.trace.ethz.ch/lidar fog simulation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work addresses the challenging task of LiDAR-based 3D object detection in foggy weather. Collecting and annotating data in such a scenario is very time, labor and cost intensive. In this paper, we tackle this problem by simulating physically accurate fog into clear-weather scenes, so that the abundant existing real datasets captured in clear weather can be repurposed for our task. Our contributions are twofold: 1) We develop a physically valid fog simulation method that is applicable to any LiDAR dataset. This unleashes the acquisition of large-scale foggy training data at no extra cost. These partially synthetic data can be used to improve the robustness of several perception methods, such as 3D object detection and tracking or simultaneous localization and mapping, on real foggy data. 2) Through extensive experiments with several state-of-the-art detection approaches, we show that our fog simulation can be leveraged to significantly improve the performance for 3D object detection in the presence of fog. Thus, we are the first to provide strong 3D object detection baselines on the Seeing Through Fog dataset. Our code is available at www.trace.ethz.ch/lidar fog simulation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work addresses the challenging task of LiDAR-based 3D object detection in foggy weather. Collecting and annotating data in such a scenario is very time, labor and cost intensive. In this paper, we tackle this problem by simulating physically accurate fog into clear-weather scenes, so that the abundant existing real datasets captured in clear weather can be repurposed for our task. Our contributions are twofold: 1) We develop a physically valid fog simulation method that is applicable to any LiDAR dataset. This unleashes the acquisition of large-scale foggy training data at no extra cost. These partially synthetic data can be used to improve the robustness of several perception methods, such as 3D object detection and tracking or simultaneous localization and mapping, on real foggy data. 2) Through extensive experiments with several state-of-the-art detection approaches, we show that our fog simulation can be leveraged to significantly improve the performance for 3D object detection in the presence of fog. Thus, we are the first to provide strong 3D object detection baselines on the Seeing Through Fog dataset. Our code is available at www.trace.ethz.ch/lidar fog simulation.",
"fno": "281200p5263",
"keywords": [
"Point Cloud Compression",
"Solid Modeling",
"Three Dimensional Displays",
"Laser Radar",
"Costs",
"Object Detection",
"Mathematical Models",
"Vision For Robotics And Autonomous Vehicles",
"Computational Photography",
"Datasets And Evaluation",
"Detection And Localization In 2 D And 3 D"
],
"authors": [
{
"affiliation": "ETH Zürich",
"fullName": "Martin Hahner",
"givenName": "Martin",
"surname": "Hahner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich",
"fullName": "Christos Sakaridis",
"givenName": "Christos",
"surname": "Sakaridis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich",
"fullName": "Dengxin Dai",
"givenName": "Dengxin",
"surname": "Dai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich",
"fullName": "Luc Van Gool",
"givenName": "Luc",
"surname": "Van Gool",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "15263-15272",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200p5253",
"articleId": "1BmGiHy323m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200p5273",
"articleId": "1BmIQQNgI12",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200d133",
"title": "LIGA-Stereo: Learning LiDAR Geometry Aware Representations for Stereo-based 3D Detector",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d133/1BmFAZXbK0g",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d743",
"title": "Multi-Echo LiDAR for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d743/1BmFBHEQzG8",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859689",
"title": "Opendenselane: A Dense Lidar-Based Dataset for HD Map Construction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859689/1G9DVnSiRXO",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09858927",
"title": "A Novel Grid-Based Geometry Compression Framework for Spinning Lidar Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09858927/1G9EN6WL3KE",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccri/2022/6800/0/680000a038",
"title": "Adaptive Two-Stage Filter for De-snowing LiDAR Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccri/2022/680000a038/1GlfZJTPjjy",
"parentPublication": {
"id": "proceedings/iccri/2022/6800/0",
"title": "2022 International Conference on Control, Robotics and Informatics (ICCRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600q6343",
"title": "LiDAR Snowfall Simulation for Robust 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600q6343/1H0LcEoVh6g",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600a908",
"title": "Modality-Agnostic Learning for Radar-Lidar Fusion in Vehicle Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600a908/1H1myLTR7Lq",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956149",
"title": "Pay \"Attention\" to Adverse Weather: Weather-aware Attention-based Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956149/1IHq3SefEVG",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800l1679",
"title": "Seeing Through Fog Without Seeing Fog: Deep Multimodal Sensor Fusion in Unseen Adverse Weather",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800l1679/1m3nuRRfhiE",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a444",
"title": "Robust Multimodal Vehicle Detection in Foggy Weather Using Complementary Lidar and Radar Signals",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a444/1yeIILFadLa",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1G9DtzCwrjW",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G9DVnSiRXO",
"doi": "10.1109/ICME52920.2022.9859689",
"title": "Opendenselane: A Dense Lidar-Based Dataset for HD Map Construction",
"normalizedTitle": "Opendenselane: A Dense Lidar-Based Dataset for HD Map Construction",
"abstract": "In autonomous driving system, High-Definition (HD) map is an important basis for localization, perception and planning tasks. For the construction of HD map, land marking detection is the first step. Recent studies of land marking detection are mainly based on camera image data, while LiDAR-based land marking detection is rarely studied. The main reason is that there are few datasets specially developed for land marking detection. In this paper, a new LiDAR-based land marking dataset named OpenDenseLane is developed for the construction of HD map, and is released to support the academic research. OpenDenseLane contains 1,709 scenarios with 57,227 frames, and each frame includes two types of LiDAR point cloud, camera image and localization data. The LiDAR data in our dataset are dense point clouds to reduce the impact of sparse point distribution. OpenDense-Lane provides abundant annotations of ground signs, such as lane line, crosswalk and turn arrow. Experiments are conducted on the proposed dataset and the results of land marking detection and HD map construction are analysed. Open-DenseLane will be released at https://github.com/Thinklab-SJTU/OpenDenseLane.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In autonomous driving system, High-Definition (HD) map is an important basis for localization, perception and planning tasks. For the construction of HD map, land marking detection is the first step. Recent studies of land marking detection are mainly based on camera image data, while LiDAR-based land marking detection is rarely studied. The main reason is that there are few datasets specially developed for land marking detection. In this paper, a new LiDAR-based land marking dataset named OpenDenseLane is developed for the construction of HD map, and is released to support the academic research. OpenDenseLane contains 1,709 scenarios with 57,227 frames, and each frame includes two types of LiDAR point cloud, camera image and localization data. The LiDAR data in our dataset are dense point clouds to reduce the impact of sparse point distribution. OpenDense-Lane provides abundant annotations of ground signs, such as lane line, crosswalk and turn arrow. Experiments are conducted on the proposed dataset and the results of land marking detection and HD map construction are analysed. Open-DenseLane will be released at https://github.com/Thinklab-SJTU/OpenDenseLane.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In autonomous driving system, High-Definition (HD) map is an important basis for localization, perception and planning tasks. For the construction of HD map, land marking detection is the first step. Recent studies of land marking detection are mainly based on camera image data, while LiDAR-based land marking detection is rarely studied. The main reason is that there are few datasets specially developed for land marking detection. In this paper, a new LiDAR-based land marking dataset named OpenDenseLane is developed for the construction of HD map, and is released to support the academic research. OpenDenseLane contains 1,709 scenarios with 57,227 frames, and each frame includes two types of LiDAR point cloud, camera image and localization data. The LiDAR data in our dataset are dense point clouds to reduce the impact of sparse point distribution. OpenDense-Lane provides abundant annotations of ground signs, such as lane line, crosswalk and turn arrow. Experiments are conducted on the proposed dataset and the results of land marking detection and HD map construction are analysed. Open-DenseLane will be released at https://github.com/Thinklab-SJTU/OpenDenseLane.",
"fno": "09859689",
"keywords": [
"Cameras",
"Optical Radar",
"Remote Sensing By Laser Beam",
"Road Traffic",
"Traffic Engineering Computing",
"HD Map Construction",
"High Definition Map",
"Li DAR Based Land Marking Detection",
"Li DAR Based Land Marking Dataset",
"Opendenselane",
"Li DAR Point Cloud",
"Camera Image",
"Localization Data",
"Li DAR Data",
"Open Dense Lane",
"Dense Lidar Based Dataset",
"Point Cloud Compression",
"Location Awareness",
"Laser Radar",
"Annotations",
"Cameras",
"Trajectory",
"Planning",
"Dataset",
"HD Map",
"Li DAR",
"Land Marking"
],
"authors": [
{
"affiliation": "Shanghai Jiao Tong University,Department of CSE & MoE Key Lab of Artificial Intelligence",
"fullName": "Xiaolei Chen",
"givenName": "Xiaolei",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,Department of CSE & MoE Key Lab of Artificial Intelligence",
"fullName": "Wenlong Liao",
"givenName": "Wenlong",
"surname": "Liao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,Department of CSE & MoE Key Lab of Artificial Intelligence",
"fullName": "Bin Liu",
"givenName": "Bin",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,Department of CSE & MoE Key Lab of Artificial Intelligence",
"fullName": "Junchi Yan",
"givenName": "Junchi",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Anhui Cowarobot Co., Ltd.",
"fullName": "Tao He",
"givenName": "Tao",
"surname": "He",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8563-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09859604",
"articleId": "1G9EneCMGuQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09859718",
"articleId": "1G9EO3uX2fK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034c418",
"title": "Probabilistic Surfel Fusion for Dense LiDAR Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c418/12OmNyQYtiq",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2014/5188/0/06831821",
"title": "Automatic registration of LiDAR and optical imagery using depth map stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2014/06831821/12OmNyeWdBR",
"parentPublication": {
"id": "proceedings/iccp/2014/5188/0",
"title": "2014 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcomp/2018/4705/0/470501a073",
"title": "Indoor Map Generation from Multiple LIDAR Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2018/470501a073/12OmNz5apxo",
"parentPublication": {
"id": "proceedings/smartcomp/2018/4705/0",
"title": "2018 IEEE International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900e449",
"title": "K-Lane: Lidar Lane Dataset and Benchmark for Urban Roads and Highways",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900e449/1G56PwmOVva",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c687",
"title": "Scribble-Supervised LiDAR Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c687/1H1hDMLXody",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10086694",
"title": "SDV-LOAM: Semi-Direct Visual-LiDAR Odometry and Mapping",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10086694/1LUpwXZtAe4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150738",
"title": "FusAtNet: Dual Attention based SpectroSpatial Multimodal Fusion Network for Hyperspectral and LiDAR Classification",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150738/1lPHBgUaDqE",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a838",
"title": "Deep LiDAR localization using optical flow sensor-map correspondences",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a838/1qyxjQGkHPW",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2021/8924/0/892400a295",
"title": "Simulated Intensity Rendering of 3D LiDAR using Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2021/892400a295/1rRccCQ6nXa",
"parentPublication": {
"id": "proceedings/bigcomp/2021/8924/0",
"title": "2021 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f958",
"title": "Unsupervised Object Detection with LiDAR Clues",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f958/1yeIYP8po1a",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1G9DtzCwrjW",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G9EN6WL3KE",
"doi": "10.1109/ICME52920.2022.9858927",
"title": "A Novel Grid-Based Geometry Compression Framework for Spinning Lidar Point Clouds",
"normalizedTitle": "A Novel Grid-Based Geometry Compression Framework for Spinning Lidar Point Clouds",
"abstract": "Point clouds captured by Light Detection And Ranging (Li-DAR) devices have played a significant role in autonomous driving and high-precision mapping. The massive amount of point cloud data, however, challenges the capacity of current data storage and transmission networks, which confines the development of LiDAR point cloud applications. To alleviate this situation, a novel grid-based geometry compression framework dedicated to spinning LiDAR point cloud is proposed in this paper. Firstly, a 2D grid-based point cloud representation is built taking advantage of the LiDAR acquisition pattern. Then, a projection is performed to effectively represent the 3D geometry as multiple 2D geometry components. Finally, dedicated prediction and entropy coding methods are designed for each 2D geometry component according to its characteristics. Experimental results show that the proposed method outperforms MPEG G-PCC with an average gain of 18.81% and 8.03% for lossy and lossless coding respectively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Point clouds captured by Light Detection And Ranging (Li-DAR) devices have played a significant role in autonomous driving and high-precision mapping. The massive amount of point cloud data, however, challenges the capacity of current data storage and transmission networks, which confines the development of LiDAR point cloud applications. To alleviate this situation, a novel grid-based geometry compression framework dedicated to spinning LiDAR point cloud is proposed in this paper. Firstly, a 2D grid-based point cloud representation is built taking advantage of the LiDAR acquisition pattern. Then, a projection is performed to effectively represent the 3D geometry as multiple 2D geometry components. Finally, dedicated prediction and entropy coding methods are designed for each 2D geometry component according to its characteristics. Experimental results show that the proposed method outperforms MPEG G-PCC with an average gain of 18.81% and 8.03% for lossy and lossless coding respectively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Point clouds captured by Light Detection And Ranging (Li-DAR) devices have played a significant role in autonomous driving and high-precision mapping. The massive amount of point cloud data, however, challenges the capacity of current data storage and transmission networks, which confines the development of LiDAR point cloud applications. To alleviate this situation, a novel grid-based geometry compression framework dedicated to spinning LiDAR point cloud is proposed in this paper. Firstly, a 2D grid-based point cloud representation is built taking advantage of the LiDAR acquisition pattern. Then, a projection is performed to effectively represent the 3D geometry as multiple 2D geometry components. Finally, dedicated prediction and entropy coding methods are designed for each 2D geometry component according to its characteristics. Experimental results show that the proposed method outperforms MPEG G-PCC with an average gain of 18.81% and 8.03% for lossy and lossless coding respectively.",
"fno": "09858927",
"keywords": [
"Data Compression",
"Entropy Codes",
"Geometric Codes",
"Image Representation",
"Optical Information Processing",
"Optical Radar",
"Video Coding",
"Novel Grid Based Geometry Compression Framework",
"Spinning Li DAR Point Cloud",
"Li DAR Acquisition Pattern",
"Multiple 2 D Geometry Components",
"2 D Geometry Component",
"Light Detection And Ranging Devices",
"High Precision Mapping",
"Point Cloud Data",
"Transmission Networks",
"Data Storage Capacity",
"Li DAR Point Cloud Applications",
"Autonomous Driving",
"2 D Grid Based Point Cloud Representation",
"3 D Geometry",
"Entropy Coding Methods",
"MPEG G PCC",
"Lossless Coding",
"Lossy Coding",
"Point Cloud Compression",
"Geometry",
"Laser Radar",
"Three Dimensional Displays",
"Transform Coding",
"Memory",
"Entropy Coding",
"Li DAR Point Cloud",
"Point Cloud Compression",
"Geometry Compression",
"Grid Based",
"Projection"
],
"authors": [
{
"affiliation": "School of Telecommunications Engineering, Xidian University,China",
"fullName": "Wei Zhang",
"givenName": "Wei",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Telecommunications Engineering, Xidian University,China",
"fullName": "Youguang Yu",
"givenName": "Youguang",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Telecommunications Engineering, Xidian University,China",
"fullName": "Fuzheng Yang",
"givenName": "Fuzheng",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8563-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09859678",
"articleId": "1G9EmvlceWI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09860012",
"articleId": "1G9DV2dGcIE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/smartcomp/2018/4705/0/470501a073",
"title": "Indoor Map Generation from Multiple LIDAR Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2018/470501a073/12OmNz5apxo",
"parentPublication": {
"id": "proceedings/smartcomp/2018/4705/0",
"title": "2018 IEEE International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d133",
"title": "LIGA-Stereo: Learning LiDAR Geometry Aware Representations for Stereo-based 3D Detector",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d133/1BmFAZXbK0g",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d743",
"title": "Multi-Echo LiDAR for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d743/1BmFBHEQzG8",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0/945700b389",
"title": "Simplification and Compression Method Based on Classified Sparse Sampling for LiDAR Point Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2021/945700b389/1DNDA3nwh0I",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0",
"title": "2021 IEEE 23rd Int Conf on High Performance Computing & Communications; 7th Int Conf on Data Science & Systems; 19th Int Conf on Smart City; 7th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccri/2022/6800/0/680000a038",
"title": "Adaptive Two-Stage Filter for De-snowing LiDAR Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccri/2022/680000a038/1GlfZJTPjjy",
"parentPublication": {
"id": "proceedings/iccri/2022/6800/0",
"title": "2022 International Conference on Control, Robotics and Informatics (ICCRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7191",
"title": "RIDDLE: Lidar Data Compression with Range Image Deep Delta Encoding",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7191/1H0Nt9FeR1K",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sec/2022/8611/0/861100a293",
"title": "Poster: Making Edge-assisted LiDAR Perceptions Robust to Lossy Point Cloud Compression",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2022/861100a293/1JC1gRWUecw",
"parentPublication": {
"id": "proceedings/sec/2022/8611/0",
"title": "2022 IEEE/ACM 7th Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sec/2022/8611/0/861100a054",
"title": "FLiCR: A Fast and Lightweight LiDAR Point Cloud Compression Based on Lossy RI",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2022/861100a054/1JC1kBl6Pde",
"parentPublication": {
"id": "proceedings/sec/2022/8611/0",
"title": "2022 IEEE/ACM 7th Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090401",
"title": "Learning to Match 2D Images and 3D LiDAR Point Clouds for Outdoor Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090401/1jIxmhXvH7a",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2021/4989/0/09456018",
"title": "An Improved Coarse-To-Fine Motion Estimation Scheme For Lidar Point Cloud Geometry Compression",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2021/09456018/1uCgq6RBNdK",
"parentPublication": {
"id": "proceedings/icmew/2021/4989/0",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H0N79ME2Wc",
"doi": "10.1109/CVPR52688.2022.00667",
"title": "HSC4D: Human-centered 4D Scene Capture in Large-scale Indoor-outdoor Space Using Wearable IMUs and LiDAR",
"normalizedTitle": "HSC4D: Human-centered 4D Scene Capture in Large-scale Indoor-outdoor Space Using Wearable IMUs and LiDAR",
"abstract": "We propose Human-centered 4D Scene Capture (HSC4D) to accurately and efficiently create a dynamic digital world, containing large-scale indoor-outdoor scenes, diverse human motions, and rich interactions between humans and environments. Using only body-mounted IMUs and LiDAR, HSC4D is space-free without any external devices' constraints and map-free without pre-built maps. Considering that IMUs can capture human poses but always drift for long-period use, while LiDAR is stable for global localization but rough for local positions and orientations, HSC4D makes both sensors complement each other by a joint optimization and achieves promising results for long-term capture. Relationships between humans and environments are also explored to make their interaction more realistic. To facilitate many down-stream tasks, like AR, VR, robots, autonomous driving, etc., we propose a dataset containing three large scenes (1k-5k m<sup>2</sup>) with accurate dynamic human motions and locations. Diverse scenarios (climbing gym, multi-story building, slope, etc.) and challenging human activities (exercising, walking up/down stairs, climbing, etc.) demonstrate the effectiveness and the generalization ability of HSC4D. The dataset and code is available at lidarhumanmotion.net/hsc4d.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose Human-centered 4D Scene Capture (HSC4D) to accurately and efficiently create a dynamic digital world, containing large-scale indoor-outdoor scenes, diverse human motions, and rich interactions between humans and environments. Using only body-mounted IMUs and LiDAR, HSC4D is space-free without any external devices' constraints and map-free without pre-built maps. Considering that IMUs can capture human poses but always drift for long-period use, while LiDAR is stable for global localization but rough for local positions and orientations, HSC4D makes both sensors complement each other by a joint optimization and achieves promising results for long-term capture. Relationships between humans and environments are also explored to make their interaction more realistic. To facilitate many down-stream tasks, like AR, VR, robots, autonomous driving, etc., we propose a dataset containing three large scenes (1k-5k m<sup>2</sup>) with accurate dynamic human motions and locations. Diverse scenarios (climbing gym, multi-story building, slope, etc.) and challenging human activities (exercising, walking up/down stairs, climbing, etc.) demonstrate the effectiveness and the generalization ability of HSC4D. The dataset and code is available at lidarhumanmotion.net/hsc4d.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose Human-centered 4D Scene Capture (HSC4D) to accurately and efficiently create a dynamic digital world, containing large-scale indoor-outdoor scenes, diverse human motions, and rich interactions between humans and environments. Using only body-mounted IMUs and LiDAR, HSC4D is space-free without any external devices' constraints and map-free without pre-built maps. Considering that IMUs can capture human poses but always drift for long-period use, while LiDAR is stable for global localization but rough for local positions and orientations, HSC4D makes both sensors complement each other by a joint optimization and achieves promising results for long-term capture. Relationships between humans and environments are also explored to make their interaction more realistic. To facilitate many down-stream tasks, like AR, VR, robots, autonomous driving, etc., we propose a dataset containing three large scenes (1k-5k m2) with accurate dynamic human motions and locations. Diverse scenarios (climbing gym, multi-story building, slope, etc.) and challenging human activities (exercising, walking up/down stairs, climbing, etc.) demonstrate the effectiveness and the generalization ability of HSC4D. The dataset and code is available at lidarhumanmotion.net/hsc4d.",
"fno": "694600g782",
"keywords": [
"Gait Analysis",
"Image Motion Analysis",
"Optical Radar",
"Pose Estimation",
"Wearable Sensors",
"Accurate Dynamic Human Motions",
"Large Scale Indoor Outdoor Space",
"Li DAR",
"Large Scale Indoor Outdoor Scenes",
"Diverse Human Motions",
"Human Poses",
"Long Term Capture",
"HSC 4 D",
"Human Centered 4 D Scene Capture",
"Wearable IMU",
"Climbing Gym",
"Multistory Building",
"Body Mounted IMU",
"Location Awareness",
"Laser Radar",
"Dynamics",
"Stairs",
"Robot Sensing Systems",
"Motion Capture",
"Sensors"
],
"authors": [
{
"affiliation": "Xiamen University,China",
"fullName": "Yudi Dai",
"givenName": "Yudi",
"surname": "Dai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,China",
"fullName": "Yitai Lin",
"givenName": "Yitai",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,China",
"fullName": "Chenglu Wen",
"givenName": "Chenglu",
"surname": "Wen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,China",
"fullName": "Siqi Shen",
"givenName": "Siqi",
"surname": "Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,China",
"fullName": "Lan Xu",
"givenName": "Lan",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,China",
"fullName": "Jingyi Yu",
"givenName": "Jingyi",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,China",
"fullName": "Yuexin Ma",
"givenName": "Yuexin",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University,China",
"fullName": "Cheng Wang",
"givenName": "Cheng",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "6782-6792",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H0N74ZaqCA",
"name": "pcvpr202269460-09880196s1-mm_694600g782.zip",
"size": "17.7 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09880196s1-mm_694600g782.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600g771",
"articleId": "1H1hx09yd2g",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600g793",
"articleId": "1H1hHBNAclG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itng/2015/8828/0/8828a680",
"title": "An Experiment of Mutual Interference between Automotive LIDAR Scanners",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2015/8828a680/12OmNyLA5Ae",
"parentPublication": {
"id": "proceedings/itng/2015/8828/0",
"title": "2015 12th International Conference on Information Technology - New Generations (ITNG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859689",
"title": "Opendenselane: A Dense Lidar-Based Dataset for HD Map Construction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859689/1G9DVnSiRXO",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0470",
"title": "LiDARCap: Long-range Markerless 3D Human Motion Capture with LiDAR Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0470/1H0OseLpTYQ",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7151",
"title": "LIFT: Learning 4D LiDAR Image Fusion Transformer for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7151/1H1m3oPR5HW",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2022/9109/0/910900a257",
"title": "Kalman Filtering Jitter Cancellation Based on Lidar Localization",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2022/910900a257/1HYv72k2aYw",
"parentPublication": {
"id": "proceedings/iccnea/2022/9109/0",
"title": "2022 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049734",
"title": "LiDAR-aid Inertial Poser: Large-scale Human Motion Capture by Sparse Inertial and LiDAR Sensors",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049734/1KYop88r9Je",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2022/1041/0/10051809",
"title": "Realization of Laser Object Vaporization Locating Based on Low-Cost 2D LiDAR",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2022/10051809/1LiNTpX4Zqg",
"parentPublication": {
"id": "proceedings/snpd/2022/1041/0",
"title": "2022 IEEE/ACIS 23rd International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413014",
"title": "Human Segmentation with Dynamic LiDAR Data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413014/1tmirz753q0",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f958",
"title": "Unsupervised Object Detection with LiDAR Clues",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f958/1yeIYP8po1a",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f523",
"title": "4D Panoptic LiDAR Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f523/1yeLogZvWrS",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H0OseLpTYQ",
"doi": "10.1109/CVPR52688.2022.01985",
"title": "LiDARCap: Long-range Markerless 3D Human Motion Capture with LiDAR Point Clouds",
"normalizedTitle": "LiDARCap: Long-range Markerless 3D Human Motion Capture with LiDAR Point Clouds",
"abstract": "Existing motion capture datasets are largely short-range and cannot yet fit the need of long-range applications. We propose LiDARHuman26M, a new human motion capture dataset captured by LiDAR at a much longer range to overcome this limitation. Our dataset also includes the ground truth human motions acquired by the IMU system and the synchronous RGB images. We further present a strong base-line method, LiDARCap, for LiDAR point cloud human motion capture. Specifically, we first utilize <tex>Z_$PointNet++$_Z</tex> to encode features of points and then employ the inverse kinematics solver and SMPL optimizer to regress the pose through aggregating the temporally encoded features hierarchically. Quantitative and qualitative experiments show that our method outperforms the techniques based only on RGB images. Ablation experiments demonstrate that our dataset is challenging and worthy of further research. Finally, the experiments on the KITTI Dataset and the Waymo Open Dataset show that our method can be generalized to different LiDAR sensor settings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Existing motion capture datasets are largely short-range and cannot yet fit the need of long-range applications. We propose LiDARHuman26M, a new human motion capture dataset captured by LiDAR at a much longer range to overcome this limitation. Our dataset also includes the ground truth human motions acquired by the IMU system and the synchronous RGB images. We further present a strong base-line method, LiDARCap, for LiDAR point cloud human motion capture. Specifically, we first utilize <tex>$PointNet++$</tex> to encode features of points and then employ the inverse kinematics solver and SMPL optimizer to regress the pose through aggregating the temporally encoded features hierarchically. Quantitative and qualitative experiments show that our method outperforms the techniques based only on RGB images. Ablation experiments demonstrate that our dataset is challenging and worthy of further research. Finally, the experiments on the KITTI Dataset and the Waymo Open Dataset show that our method can be generalized to different LiDAR sensor settings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Existing motion capture datasets are largely short-range and cannot yet fit the need of long-range applications. We propose LiDARHuman26M, a new human motion capture dataset captured by LiDAR at a much longer range to overcome this limitation. Our dataset also includes the ground truth human motions acquired by the IMU system and the synchronous RGB images. We further present a strong base-line method, LiDARCap, for LiDAR point cloud human motion capture. Specifically, we first utilize - to encode features of points and then employ the inverse kinematics solver and SMPL optimizer to regress the pose through aggregating the temporally encoded features hierarchically. Quantitative and qualitative experiments show that our method outperforms the techniques based only on RGB images. Ablation experiments demonstrate that our dataset is challenging and worthy of further research. Finally, the experiments on the KITTI Dataset and the Waymo Open Dataset show that our method can be generalized to different LiDAR sensor settings.",
"fno": "694600u0470",
"keywords": [
"Image Colour Analysis",
"Image Motion Analysis",
"Optical Radar",
"Pose Estimation",
"Li DAR Cap",
"3 D Human Motion Capture",
"Li DAR Point Clouds",
"Motion Capture Datasets",
"Long Range Applications",
"Human Motion Capture Dataset",
"Longer Range",
"Ground Truth Human Motions",
"Synchronous RGB Images",
"Strong Base Line Method",
"Li DAR Point Cloud Human Motion Capture",
"Temporally Encoded Features",
"KITTI Dataset",
"Waymo Open Dataset Show",
"Different Li DAR Sensor Settings",
"Point Cloud Compression",
"Computer Vision",
"Laser Radar",
"Three Dimensional Displays",
"Motion Capture",
"Pattern Recognition"
],
"authors": [
{
"affiliation": "Xiamen University,Fujian Key Laboratory of Sensing and Computing for Smart Cities",
"fullName": "Jialian Li",
"givenName": "Jialian",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,Fujian Key Laboratory of Sensing and Computing for Smart Cities",
"fullName": "Jingyi Zhang",
"givenName": "Jingyi",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,Fujian Key Laboratory of Sensing and Computing for Smart Cities",
"fullName": "Zhiyong Wang",
"givenName": "Zhiyong",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,Fujian Key Laboratory of Sensing and Computing for Smart Cities",
"fullName": "Siqi Shen",
"givenName": "Siqi",
"surname": "Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,Fujian Key Laboratory of Sensing and Computing for Smart Cities",
"fullName": "Chenglu Wen",
"givenName": "Chenglu",
"surname": "Wen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University,Shanghai Engineering Research Center of Intelligent Vision and Imaging",
"fullName": "Yuexin Ma",
"givenName": "Yuexin",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University,Shanghai Engineering Research Center of Intelligent Vision and Imaging",
"fullName": "Lan Xu",
"givenName": "Lan",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University,Shanghai Engineering Research Center of Intelligent Vision and Imaging",
"fullName": "Jingyi Yu",
"givenName": "Jingyi",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xiamen University,Fujian Key Laboratory of Sensing and Computing for Smart Cities",
"fullName": "Cheng Wang",
"givenName": "Cheng",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "20470-20480",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H0OsapbM9q",
"name": "pcvpr202269460-09880366s1-mm_694600u0470.zip",
"size": "13.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09880366s1-mm_694600u0470.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600u0460",
"articleId": "1H0OEv2zO8w",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600u0481",
"articleId": "1H1n3sTx7A4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2022/0915/0/091500b716",
"title": "Biomass Prediction with 3D Point Clouds from LiDAR",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500b716/1B13jVCzyNO",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5263",
"title": "Fog Simulation on Real LiDAR Point Clouds for 3D Object Detection in Adverse Weather",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5263/1BmL0hDh5Uk",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900e418",
"title": "PointMotionNet: Point-Wise Motion Learning for Large-Scale LiDAR Point Clouds Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900e418/1G563yaYq1q",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859689",
"title": "Opendenselane: A Dense Lidar-Based Dataset for HD Map Construction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859689/1G9DVnSiRXO",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09893384",
"title": "Efficient 3D Deep LiDAR Odometry",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09893384/1GGLsotG8IU",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccri/2022/6800/0/680000a038",
"title": "Adaptive Two-Stage Filter for De-snowing LiDAR Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccri/2022/680000a038/1GlfZJTPjjy",
"parentPublication": {
"id": "proceedings/iccri/2022/6800/0",
"title": "2022 International Conference on Control, Robotics and Informatics (ICCRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g782",
"title": "HSC4D: Human-centered 4D Scene Capture in Large-scale Indoor-outdoor Space Using Wearable IMUs and LiDAR",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g782/1H0N79ME2Wc",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c687",
"title": "Scribble-Supervised LiDAR Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c687/1H1hDMLXody",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049734",
"title": "LiDAR-aid Inertial Poser: Large-scale Human Motion Capture by Sparse Inertial and LiDAR Sensors",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049734/1KYop88r9Je",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eiect/2022/9956/0/995600a410",
"title": "Lidar-Camera Fusion Based on KD-Tree Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/eiect/2022/995600a410/1LHctpPSsRq",
"parentPublication": {
"id": "proceedings/eiect/2022/9956/0",
"title": "2022 2nd International Conference on Electronic Information Engineering and Computer Technology (EIECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1hDMLXody",
"doi": "10.1109/CVPR52688.2022.00272",
"title": "Scribble-Supervised LiDAR Semantic Segmentation",
"normalizedTitle": "Scribble-Supervised LiDAR Semantic Segmentation",
"abstract": "Densely annotating LiDAR point clouds remains too expensive and time-consuming to keep up with the ever growing volume of data. While current literature focuses on fully-supervised performance, developing efficient methods that take advantage of realistic weak supervision have yet to be explored. In this paper, we propose using scribbles to annotate LiDAR point clouds and release ScribbleKITTI, the first scribble-annotated dataset for LiDAR semantic segmentation. Furthermore, we present a pipeline to reduce the performance gap that arises when using such weak annotations. Our pipeline comprises of three stand-alone contributions that can be combined with any LiDAR semantic segmentation model to achieve up to 95.7% of the fully-supervised performance while using only 8% labeled points. Our scribble annotations and code are available at github.com/ouenal/scribblekitti.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Densely annotating LiDAR point clouds remains too expensive and time-consuming to keep up with the ever growing volume of data. While current literature focuses on fully-supervised performance, developing efficient methods that take advantage of realistic weak supervision have yet to be explored. In this paper, we propose using scribbles to annotate LiDAR point clouds and release ScribbleKITTI, the first scribble-annotated dataset for LiDAR semantic segmentation. Furthermore, we present a pipeline to reduce the performance gap that arises when using such weak annotations. Our pipeline comprises of three stand-alone contributions that can be combined with any LiDAR semantic segmentation model to achieve up to 95.7% of the fully-supervised performance while using only 8% labeled points. Our scribble annotations and code are available at github.com/ouenal/scribblekitti.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Densely annotating LiDAR point clouds remains too expensive and time-consuming to keep up with the ever growing volume of data. While current literature focuses on fully-supervised performance, developing efficient methods that take advantage of realistic weak supervision have yet to be explored. In this paper, we propose using scribbles to annotate LiDAR point clouds and release ScribbleKITTI, the first scribble-annotated dataset for LiDAR semantic segmentation. Furthermore, we present a pipeline to reduce the performance gap that arises when using such weak annotations. Our pipeline comprises of three stand-alone contributions that can be combined with any LiDAR semantic segmentation model to achieve up to 95.7% of the fully-supervised performance while using only 8% labeled points. Our scribble annotations and code are available at github.com/ouenal/scribblekitti.",
"fno": "694600c687",
"keywords": [
"Feature Extraction",
"Image Segmentation",
"Learning Artificial Intelligence",
"Optical Radar",
"Scribble Supervised Li DAR Semantic Segmentation",
"Densely Annotating Li DAR Point Clouds",
"Fully Supervised Performance",
"Realistic Weak Supervision",
"Scribbles",
"Scribble Annotated Dataset",
"Pipeline",
"Performance Gap",
"Weak Annotations",
"Li DAR Semantic Segmentation Model",
"8 Labeled Points",
"Scribble Annotations",
"Point Cloud Compression",
"Training",
"Computer Vision",
"Laser Radar",
"Codes",
"Annotations",
"Computational Modeling",
"Segmentation",
"Grouping And Shape Analysis Navigation And Autonomous Driving Self Semi Meta Unsupervised Learning"
],
"authors": [
{
"affiliation": "ETH Zurich",
"fullName": "Ozan Unal",
"givenName": "Ozan",
"surname": "Unal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zurich",
"fullName": "Dengxin Dai",
"givenName": "Dengxin",
"surname": "Dai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zurich",
"fullName": "Luc Van Gool",
"givenName": "Luc",
"surname": "Van Gool",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2687-2697",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H1hDJlV23m",
"name": "pcvpr202269460-09879596s1-mm_694600c687.zip",
"size": "1.52 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879596s1-mm_694600c687.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600c676",
"articleId": "1H1jhFf87U4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600c698",
"articleId": "1H0N5HlpVtK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851d159",
"title": "ScribbleSup: Scribble-Supervised Convolutional Networks for Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d159/12OmNy49sIV",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3106",
"title": "SLIM: Self-Supervised LiDAR Scene Flow and Motion Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3106/1BmIjnW2Eow",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200q6260",
"title": "Perception-Aware Multi-Sensor Fusion for 3D LiDAR Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200q6260/1BmJKbCSk5G",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200h396",
"title": "Scribble-Supervised Semantic Segmentation by Uncertainty Reduction on Neural Representation and Self-Supervision on Neural Eigenspace",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200h396/1BmLmd75nZS",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900e449",
"title": "K-Lane: Lidar Lane Dataset and Benchmark for Urban Roads and Highways",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900e449/1G56PwmOVva",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859689",
"title": "Opendenselane: A Dense Lidar-Based Dataset for HD Map Construction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859689/1G9DVnSiRXO",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859693",
"title": "Cenet: Toward Concise and Efficient Lidar Semantic Segmentation for Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859693/1G9Ek0QiHvy",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600j881",
"title": "Image-to-Lidar Self-Supervised Distillation for Autonomous Driving Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600j881/1H0KyVm6rJu",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2023/2056/0/205600a350",
"title": "Masked Autoencoder for Self-Supervised Pre-training on Lidar Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2023/205600a350/1Kzz7RCHLeU",
"parentPublication": {
"id": "proceedings/wacvw/2023/2056/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600b644",
"title": "Unsupervised 4D LiDAR Moving Object Segmentation in Stationary Settings with Multivariate Occupancy Time Series",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600b644/1L8qrCqPxPq",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1HYv01GKQDK",
"title": "2022 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"acronym": "iccnea",
"groupId": "1823164",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1HYv72k2aYw",
"doi": "10.1109/ICCNEA57056.2022.00064",
"title": "Kalman Filtering Jitter Cancellation Based on Lidar Localization",
"normalizedTitle": "Kalman Filtering Jitter Cancellation Based on Lidar Localization",
"abstract": "Positioning using lidar is different from GPS, which directly obtains must coordinates of the earth’s surface, and from IMU and odometers, which obtain position data through integration. It involves mapping an area before using sensor data such as lidar, cameras and IMU. In this paper, the study of lidar and camera, odometer as sensors is done by introducing the calibration of sensors, which are used in various devices in recent times and the data obtained are processed by using kalman filtering. By comparison, it is found that the data processing by kalman filtering makes the data smoother and the prediction more accurate. This enhances the application in the data processing process. It is also found through analysis that the use of kalman filtering is more productive in the process of using kalman filtering due to the loosely coupled relationship between various sensors, thus providing a new way of thinking for research in this field.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Positioning using lidar is different from GPS, which directly obtains must coordinates of the earth’s surface, and from IMU and odometers, which obtain position data through integration. It involves mapping an area before using sensor data such as lidar, cameras and IMU. In this paper, the study of lidar and camera, odometer as sensors is done by introducing the calibration of sensors, which are used in various devices in recent times and the data obtained are processed by using kalman filtering. By comparison, it is found that the data processing by kalman filtering makes the data smoother and the prediction more accurate. This enhances the application in the data processing process. It is also found through analysis that the use of kalman filtering is more productive in the process of using kalman filtering due to the loosely coupled relationship between various sensors, thus providing a new way of thinking for research in this field.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Positioning using lidar is different from GPS, which directly obtains must coordinates of the earth’s surface, and from IMU and odometers, which obtain position data through integration. It involves mapping an area before using sensor data such as lidar, cameras and IMU. In this paper, the study of lidar and camera, odometer as sensors is done by introducing the calibration of sensors, which are used in various devices in recent times and the data obtained are processed by using kalman filtering. By comparison, it is found that the data processing by kalman filtering makes the data smoother and the prediction more accurate. This enhances the application in the data processing process. It is also found through analysis that the use of kalman filtering is more productive in the process of using kalman filtering due to the loosely coupled relationship between various sensors, thus providing a new way of thinking for research in this field.",
"fno": "910900a257",
"keywords": [
"Calibration",
"Distance Measurement",
"Global Positioning System",
"Jitter",
"Kalman Filters",
"Optical Radar",
"Data Smoother",
"Data Processing Process",
"Kalman Filtering Jitter Cancellation",
"Lidar Localization",
"IMU",
"Odometer",
"Position Data",
"Sensor Data",
"Location Awareness",
"Laser Radar",
"Filtering",
"Jitter",
"Data Processing",
"Cameras",
"Computer Networks",
"Oral Lidar",
"Positioning",
"Kalman",
"Sensors"
],
"authors": [
{
"affiliation": "Institute of Artificial Intelligence and Data Science Xi’an Technological University,Xi’an,China,710021",
"fullName": "Lei Li",
"givenName": "Lei",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Artificial Intelligence and Data Science Xi’an Technological University,Xi’an,China,710021",
"fullName": "Jianguo Wang",
"givenName": "Jianguo",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccnea",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-09-01T00:00:00",
"pubType": "proceedings",
"pages": "257-260",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9109-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "910900a252",
"articleId": "1HYv2rAWICc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "910900a261",
"articleId": "1HYv6JK9X8I",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ivs/2005/8961/0/01505126",
"title": "Corridor navigation with a LiDAR/INS Kalman filter solution",
"doi": null,
"abstractUrl": "/proceedings-article/ivs/2005/01505126/12OmNy6HQTm",
"parentPublication": {
"id": "proceedings/ivs/2005/8961/0",
"title": "2005 IEEE Intelligent Vehicles Symposium Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859689",
"title": "Opendenselane: A Dense Lidar-Based Dataset for HD Map Construction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859689/1G9DVnSiRXO",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g782",
"title": "HSC4D: Human-centered 4D Scene Capture in Large-scale Indoor-outdoor Space Using Wearable IMUs and LiDAR",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g782/1H0N79ME2Wc",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2022/1041/0/10051809",
"title": "Realization of Laser Object Vaporization Locating Based on Low-Cost 2D LiDAR",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2022/10051809/1LiNTpX4Zqg",
"parentPublication": {
"id": "proceedings/snpd/2022/1041/0",
"title": "2022 IEEE/ACIS 23rd International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2019/03/08721124",
"title": "Automatic Vehicle Tracking With Roadside LiDAR Data for the Connected-Vehicles System",
"doi": null,
"abstractUrl": "/magazine/ex/2019/03/08721124/1bLyse6jMSA",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2021/8924/0/892400a302",
"title": "Solid-State LiDAR based-SLAM: A Concise Review and Application",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2021/892400a302/1rRcds1ib2o",
"parentPublication": {
"id": "proceedings/bigcomp/2021/8924/0",
"title": "2021 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2021/3596/0/359600a296",
"title": "Background Filtering and Object Detection with Roadside LiDAR Data",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2021/359600a296/1tMPPC3xhp6",
"parentPublication": {
"id": "proceedings/icedme/2021/3596/0",
"title": "2021 4th International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412843",
"title": "Loop-closure detection by LiDAR scan re-identification",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412843/1tmjVAtRW6Y",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aemcse/2021/1596/0/159600a360",
"title": "Research on mapping method based on data fusion of lidar and depth camera",
"doi": null,
"abstractUrl": "/proceedings-article/aemcse/2021/159600a360/1wcdxnfEc5a",
"parentPublication": {
"id": "proceedings/aemcse/2021/1596/0",
"title": "2021 4th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f958",
"title": "Unsupervised Object Detection with LiDAR Clues",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f958/1yeIYP8po1a",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1LkfvODkBaM",
"title": "2022 4th International Conference on Applied Machine Learning (ICAML)",
"acronym": "icaml",
"groupId": "10056426",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1Lkfwvwbez6",
"doi": "10.1109/ICAML57167.2022.00052",
"title": "LiDAR Odometer Based on Visual Enhancement",
"normalizedTitle": "LiDAR Odometer Based on Visual Enhancement",
"abstract": "A single LiDAR odometer is prone to location failure in similar scenarios using only geometric information, where its location accuracy is relatively low. A novel LiDAR odometer based on visual enhancement is proposed in this paper to solve those problems. Firstly, the point cloud from LiDAR is attached with visual information through the fusion of the image and the point cloud. Then the colour continuum surface is constructed to transform the discrete colour information of the point cloud into the continuous colour information, from which photometric constraint is built. Finally, the odometer is established by photometric constraint and geometric constraint. Experiments on the KITTI data set show that the proposed odometer is more accurate and robust than other single LiDAR odometers such as ICP-SVD, ICP-GN, NDT and LOAM under the same conditions, which verifies the progressive nature of the proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A single LiDAR odometer is prone to location failure in similar scenarios using only geometric information, where its location accuracy is relatively low. A novel LiDAR odometer based on visual enhancement is proposed in this paper to solve those problems. Firstly, the point cloud from LiDAR is attached with visual information through the fusion of the image and the point cloud. Then the colour continuum surface is constructed to transform the discrete colour information of the point cloud into the continuous colour information, from which photometric constraint is built. Finally, the odometer is established by photometric constraint and geometric constraint. Experiments on the KITTI data set show that the proposed odometer is more accurate and robust than other single LiDAR odometers such as ICP-SVD, ICP-GN, NDT and LOAM under the same conditions, which verifies the progressive nature of the proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A single LiDAR odometer is prone to location failure in similar scenarios using only geometric information, where its location accuracy is relatively low. A novel LiDAR odometer based on visual enhancement is proposed in this paper to solve those problems. Firstly, the point cloud from LiDAR is attached with visual information through the fusion of the image and the point cloud. Then the colour continuum surface is constructed to transform the discrete colour information of the point cloud into the continuous colour information, from which photometric constraint is built. Finally, the odometer is established by photometric constraint and geometric constraint. Experiments on the KITTI data set show that the proposed odometer is more accurate and robust than other single LiDAR odometers such as ICP-SVD, ICP-GN, NDT and LOAM under the same conditions, which verifies the progressive nature of the proposed method.",
"fno": "626500a232",
"keywords": [
"Distance Measurement",
"Image Colour Analysis",
"Object Detection",
"Optical Radar",
"Singular Value Decomposition",
"Colour Continuum Surface",
"Continuous Colour Information",
"Discrete Colour Information",
"Geometric Constraint",
"Geometric Information",
"Location Accuracy",
"Location Failure",
"Novel Li DAR Odometer",
"Photometric Constraint",
"Point Cloud",
"Similar Scenarios",
"Single Li DAR Odometer",
"Visual Enhancement",
"Visual Information",
"Point Cloud Compression",
"Visualization",
"Laser Radar",
"Image Color Analysis",
"Transforms",
"Machine Learning",
"Li DAR Odometer",
"Visual Enhancement",
"Sensor Fusion"
],
"authors": [
{
"affiliation": "Shenzhen International Graduate School Tsinghua University,Beijing,China",
"fullName": "Zuoxian Liang",
"givenName": "Zuoxian",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenzhen International Graduate School Tsinghua University,Beijing,China",
"fullName": "Kai Zhang",
"givenName": "Kai",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Machine Patrol Management Center Guangdong Power Grid Corporation,Guangzhou,China",
"fullName": "Yungen Liu",
"givenName": "Yungen",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icaml",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "232-236",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6265-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "626500a228",
"articleId": "1LkfCSvRwJi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "626500a237",
"articleId": "1LkfEjWmiru",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/citce/2021/2184/0/218400a001",
"title": "Improved Iterative Closest Point (ICP) Point Cloud Registration Algorithm based on Matching Point Pair Quadratic Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/citce/2021/218400a001/1BtfTK26ZHO",
"parentPublication": {
"id": "proceedings/citce/2021/2184/0",
"title": "2021 International Conference on Computer, Internet of Things and Control Engineering (CITCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2021/2471/0/09762188",
"title": "Attention Focused Generative Network for Reducing Self-Occlusions in Aerial LiDAR",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2021/09762188/1CT9ciui4CI",
"parentPublication": {
"id": "proceedings/aipr/2021/2471/0",
"title": "2021 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859689",
"title": "Opendenselane: A Dense Lidar-Based Dataset for HD Map Construction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859689/1G9DVnSiRXO",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2022/9548/0/954800a366",
"title": "ADAPTIVE ACQUISITION OF AIRBORNE LIDAR POINT CLOUD BASED ON DEEP REINFORCEMENT LEARNING",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2022/954800a366/1GvdduhsaUo",
"parentPublication": {
"id": "proceedings/mipr/2022/9548/0",
"title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c687",
"title": "Scribble-Supervised LiDAR Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c687/1H1hDMLXody",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2022/9109/0/910900a257",
"title": "Kalman Filtering Jitter Cancellation Based on Lidar Localization",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2022/910900a257/1HYv72k2aYw",
"parentPublication": {
"id": "proceedings/iccnea/2022/9109/0",
"title": "2022 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hdis/2022/9144/0/09991525",
"title": "An Adaptive Overlap-based ICP Algorithm for Multi-LiDAR Calibration in Low-overlap Situations",
"doi": null,
"abstractUrl": "/proceedings-article/hdis/2022/09991525/1JwPU729UaY",
"parentPublication": {
"id": "proceedings/hdis/2022/9144/0",
"title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eiect/2022/9956/0/995600a410",
"title": "Lidar-Camera Fusion Based on KD-Tree Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/eiect/2022/995600a410/1LHctpPSsRq",
"parentPublication": {
"id": "proceedings/eiect/2022/9956/0",
"title": "2022 2nd International Conference on Electronic Information Engineering and Computer Technology (EIECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10086694",
"title": "SDV-LOAM: Semi-Direct Visual-LiDAR Odometry and Mapping",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10086694/1LUpwXZtAe4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwecai/2020/8149/0/814900a032",
"title": "Positioning System Based on Lidar Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/iwecai/2020/814900a032/1nTurytQjoQ",
"parentPublication": {
"id": "proceedings/iwecai/2020/8149/0",
"title": "2020 International Workshop on Electronic Communication and Artificial Intelligence (IWECAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrMHOd6",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"acronym": "hicss",
"groupId": "1000730",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvDqsPL",
"doi": "10.1109/HICSS.2016.15",
"title": "Introducing Avatarification: An Experimental Examination of How Avatars Influence Student Motivation",
"normalizedTitle": "Introducing Avatarification: An Experimental Examination of How Avatars Influence Student Motivation",
"abstract": "While gamification has been studied, applied, and sometimes contested within a variety of contexts (especially education and business), the concept of avatarification -- the utilization of virtual self-representations within a mediated environment -- is relatively new and has great potential for enhancing learning contexts. Building on previous work which suggests that people behave consistently with their avatars' characteristics, the present research aims to develop an understanding of how avatars can be integrated into student communication in ways that increase performance motivation. In a field experiment conducted with an undergraduate class, 229 participants used avatars to communicate over a 15-week period about class material. Results suggest that using an ideal-self avatar or superhero-student avatar augmented student performance motivation during the avatar-use task, but the superhero-student avatar unexpectedly hindered performance motivation in a task unrelated to avatar use. This suggests novel theoretical and practical implications for avatar use in education.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While gamification has been studied, applied, and sometimes contested within a variety of contexts (especially education and business), the concept of avatarification -- the utilization of virtual self-representations within a mediated environment -- is relatively new and has great potential for enhancing learning contexts. Building on previous work which suggests that people behave consistently with their avatars' characteristics, the present research aims to develop an understanding of how avatars can be integrated into student communication in ways that increase performance motivation. In a field experiment conducted with an undergraduate class, 229 participants used avatars to communicate over a 15-week period about class material. Results suggest that using an ideal-self avatar or superhero-student avatar augmented student performance motivation during the avatar-use task, but the superhero-student avatar unexpectedly hindered performance motivation in a task unrelated to avatar use. This suggests novel theoretical and practical implications for avatar use in education.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While gamification has been studied, applied, and sometimes contested within a variety of contexts (especially education and business), the concept of avatarification -- the utilization of virtual self-representations within a mediated environment -- is relatively new and has great potential for enhancing learning contexts. Building on previous work which suggests that people behave consistently with their avatars' characteristics, the present research aims to develop an understanding of how avatars can be integrated into student communication in ways that increase performance motivation. In a field experiment conducted with an undergraduate class, 229 participants used avatars to communicate over a 15-week period about class material. Results suggest that using an ideal-self avatar or superhero-student avatar augmented student performance motivation during the avatar-use task, but the superhero-student avatar unexpectedly hindered performance motivation in a task unrelated to avatar use. This suggests novel theoretical and practical implications for avatar use in education.",
"fno": "5670a051",
"keywords": [
"Avatars",
"Context",
"Games",
"Education",
"Visualization",
"Psychology",
"Media",
"Motivation",
"Avatars",
"Education",
"Avatarification",
"Students",
"Performance"
],
"authors": [
{
"affiliation": null,
"fullName": "Rabindra Ratan",
"givenName": "Rabindra",
"surname": "Ratan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "RV Rikard",
"givenName": "RV",
"surname": "Rikard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Celina Wanek",
"givenName": "Celina",
"surname": "Wanek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Madison McKinley",
"givenName": "Madison",
"surname": "McKinley",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lee Johnson",
"givenName": "Lee",
"surname": "Johnson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Young June Sah",
"givenName": "Young June",
"surname": "Sah",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2016-01-01T00:00:00",
"pubType": "proceedings",
"pages": "51-59",
"year": "2016",
"issn": "1530-1605",
"isbn": "978-0-7695-5670-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5670a041",
"articleId": "12OmNxcdG0Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5670a060",
"articleId": "12OmNC3Xhpl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vs-games/2009/3588/0/3588a005",
"title": "A Model of Motivation Based on Empathy for AI-Driven Avatars in Virtual Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2009/3588a005/12OmNB06l7H",
"parentPublication": {
"id": "proceedings/vs-games/2009/3588/0",
"title": "Games and Virtual Worlds for Serious Applications, Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvmp/2009/3893/0/3893a152",
"title": "Reusable, Interactive, Multilingual Online Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvmp/2009/3893a152/12OmNBp52Ag",
"parentPublication": {
"id": "proceedings/cvmp/2009/3893/0",
"title": "2009 Conference for Visual Media Production",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2011/468/0/06142700",
"title": "Does an avatar motivate?",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2011/06142700/12OmNCcKQGG",
"parentPublication": {
"id": "proceedings/fie/2011/468/0",
"title": "2011 Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a190",
"title": "Humans as Avatars in Smart and Playable Cities",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a190/12OmNwoPtjU",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a308",
"title": "Empirically Evaluating the Effects of Eye Height and Self-Avatars on Dynamic Passability Affordances in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a308/1MNgWLowz1m",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798122",
"title": "An Initial Investigation into Stereotypical Influences on Implicit Racial Bias and Embodied Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798122/1cJ0MR4xjWg",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797907",
"title": "Digital Demons: Psychological Effects of Creating, and Engaging with, Virtual Avatars Representing Undesirable Aspects of the Self",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797907/1cJ1eAkUYNO",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a050",
"title": "The Effects of Virtual Avatar Visibility on Pointing Interpretation by Observers in 3D Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a050/1yeDa4aaGY0",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2021/3851/0/09637199",
"title": "Exploring the Impact of Non-conventional Gamification Elements on Student Motivation and Engagement",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2021/09637199/1zuvZHNGH0Q",
"parentPublication": {
"id": "proceedings/fie/2021/3851/0",
"title": "2021 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7WtSuCzdu",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00189",
"title": "Investigating the Relation Between Gender Expression of Mixed Reality Avatars and Sexuality of Male Users",
"normalizedTitle": "Investigating the Relation Between Gender Expression of Mixed Reality Avatars and Sexuality of Male Users",
"abstract": "Mixed reality environments are the next step in remote collaboration, yet there has been little attention towards avatar design for the en-joyability and efficiency of diverse users, particularly for the lesbian, gay, bisexual, transgender and queer (LGBTQ+) community. We believe that it is important to adapt the design of virtual avatars users collaborate with depending on not only the users gender but also their sexuality. In this study, we focus on examining the relationship between the sexual orientation of male users with the gender expression of virtual avatars, while they perform a collaborative task in mixed reality. We analyzed the perception of straight cisgender men and gay cisgender men in relation to the gender expression of the above mentioned agents and find differences that might affect design decisions for creating user-base specific avatars. We found that Female avatars tend to have a more positive perception when it comes to gay men. And for mixed reality, gay men also tend to have both negative and positive experiences more intensely than straight men.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mixed reality environments are the next step in remote collaboration, yet there has been little attention towards avatar design for the en-joyability and efficiency of diverse users, particularly for the lesbian, gay, bisexual, transgender and queer (LGBTQ+) community. We believe that it is important to adapt the design of virtual avatars users collaborate with depending on not only the users gender but also their sexuality. In this study, we focus on examining the relationship between the sexual orientation of male users with the gender expression of virtual avatars, while they perform a collaborative task in mixed reality. We analyzed the perception of straight cisgender men and gay cisgender men in relation to the gender expression of the above mentioned agents and find differences that might affect design decisions for creating user-base specific avatars. We found that Female avatars tend to have a more positive perception when it comes to gay men. And for mixed reality, gay men also tend to have both negative and positive experiences more intensely than straight men.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mixed reality environments are the next step in remote collaboration, yet there has been little attention towards avatar design for the en-joyability and efficiency of diverse users, particularly for the lesbian, gay, bisexual, transgender and queer (LGBTQ+) community. We believe that it is important to adapt the design of virtual avatars users collaborate with depending on not only the users gender but also their sexuality. In this study, we focus on examining the relationship between the sexual orientation of male users with the gender expression of virtual avatars, while they perform a collaborative task in mixed reality. We analyzed the perception of straight cisgender men and gay cisgender men in relation to the gender expression of the above mentioned agents and find differences that might affect design decisions for creating user-base specific avatars. We found that Female avatars tend to have a more positive perception when it comes to gay men. And for mixed reality, gay men also tend to have both negative and positive experiences more intensely than straight men.",
"fno": "536500a881",
"keywords": [
"Avatars",
"Gender Issues",
"Groupware",
"Human Factors",
"Avatar Design",
"Collaborative Task",
"Design Decisions",
"Diverse Users",
"En Joyability",
"Female Avatars",
"Gay Cisgender Men",
"Gay Men",
"Gender Expression",
"Male Users",
"Mixed Reality Avatars",
"Mixed Reality Environments",
"Remote Collaboration",
"Sexual Orientation",
"Sexuality",
"Straight Cisgender Men",
"Straight Men",
"Transgender",
"User Base Specific Avatars",
"Users Gender",
"Virtual Avatars",
"Avatars",
"Mixed Reality",
"Collaboration",
"Transgender Issues",
"Task Analysis",
"Augmented Reality",
"Mixed Reality",
"Sexual Orientation",
"Gender",
"Collaborative Agents",
"User Centred Design"
],
"authors": [
{
"affiliation": "Keio University Graduate, School of Media Design",
"fullName": "Anish Kundu",
"givenName": "Anish",
"surname": "Kundu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Keio University Graduate, School of Media Design",
"fullName": "Yun Suen Pai",
"givenName": "Yun Suen",
"surname": "Pai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Keio University Graduate, School of Media Design",
"fullName": "Kouta Minamizawa",
"givenName": "Kouta",
"surname": "Minamizawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "881-884",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a875",
"articleId": "1J7W8qbNT56",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a885",
"articleId": "1J7WssjkGSQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2010/6237/0/05444792",
"title": "Mixed reality in virtual world teleconferencing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444792/12OmNwpoFEM",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvvrhc/1998/8283/0/82830078",
"title": "Vision and Graphics in Producing Mixed Reality Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1",
"parentPublication": {
"id": "proceedings/cvvrhc/1998/8283/0",
"title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2019/01/08491276",
"title": "OpenStack Gender Diversity Report",
"doi": null,
"abstractUrl": "/magazine/so/2019/01/08491276/17D45WnnFYb",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a363",
"title": "Mixed Reality Agent-Based Framework for Pedestrian-Cyclist Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a363/1J7WiRFGnDi",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a666",
"title": "Investigating User Embodiment of Inverse-Kinematic Avatars in Smartphone Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a666/1JrR5i5jDhe",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2019/0869/0/086900a700",
"title": "Investigating the Effects of Gender Bias on GitHub",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2019/086900a700/1cMFvs0gd6o",
"parentPublication": {
"id": "proceedings/icse/2019/0869/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a026",
"title": "The Kuroko Paradigm: The Implications of Augmenting Physical Interaction with AR Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a026/1gysn4uy67C",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a462",
"title": "Body Weight Perception of Females using Photorealistic Avatars in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a462/1pysu9tPcGc",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a064",
"title": "Pixel Codec Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a064/1yeMmobLlwQ",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrQVmURYMo",
"doi": "10.1109/ISMAR55827.2022.00061",
"title": "What Can I Do There? Controlling AR Self-Avatars to Better Perceive Affordances of the Real World",
"normalizedTitle": "What Can I Do There? Controlling AR Self-Avatars to Better Perceive Affordances of the Real World",
"abstract": "This work explores a new usage of Augmented Reality (AR) to extend perception and interaction within physical areas ahead of ourselves. To do so, we propose to detach ourselves from our physical position by creating a controllable “digital copy”; of our body that can be used to navigate in local space from a third-person perspective. With such a viewpoint, we aim to improve our mental representation of distant space and understanding of action possibilities (called affordances), without requiring us to physically enter this space. Our approach relies on AR to virtually integrate the user’s body in remote areas in the form of an avatar. We discuss concrete application scenarios and propose several techniques to manipulate avatars in the third person as a part of a larger conceptual framework. Finally, through a user study employing one of the proposed techniques (puppeteering), we evaluate the validity of using third-person embodiment to extend our perception of the real world to areas outside of our proximal zone. We found that this approach succeeded in enhancing the user’s accuracy and confidence when estimating their action capabilities at distant locations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work explores a new usage of Augmented Reality (AR) to extend perception and interaction within physical areas ahead of ourselves. To do so, we propose to detach ourselves from our physical position by creating a controllable “digital copy”; of our body that can be used to navigate in local space from a third-person perspective. With such a viewpoint, we aim to improve our mental representation of distant space and understanding of action possibilities (called affordances), without requiring us to physically enter this space. Our approach relies on AR to virtually integrate the user’s body in remote areas in the form of an avatar. We discuss concrete application scenarios and propose several techniques to manipulate avatars in the third person as a part of a larger conceptual framework. Finally, through a user study employing one of the proposed techniques (puppeteering), we evaluate the validity of using third-person embodiment to extend our perception of the real world to areas outside of our proximal zone. We found that this approach succeeded in enhancing the user’s accuracy and confidence when estimating their action capabilities at distant locations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work explores a new usage of Augmented Reality (AR) to extend perception and interaction within physical areas ahead of ourselves. To do so, we propose to detach ourselves from our physical position by creating a controllable “digital copy”; of our body that can be used to navigate in local space from a third-person perspective. With such a viewpoint, we aim to improve our mental representation of distant space and understanding of action possibilities (called affordances), without requiring us to physically enter this space. Our approach relies on AR to virtually integrate the user’s body in remote areas in the form of an avatar. We discuss concrete application scenarios and propose several techniques to manipulate avatars in the third person as a part of a larger conceptual framework. Finally, through a user study employing one of the proposed techniques (puppeteering), we evaluate the validity of using third-person embodiment to extend our perception of the real world to areas outside of our proximal zone. We found that this approach succeeded in enhancing the user’s accuracy and confidence when estimating their action capabilities at distant locations.",
"fno": "532500a450",
"keywords": [
"Augmented Reality",
"Avatars",
"Man Machine Systems",
"Mobile Robots",
"User Interfaces",
"Virtual Reality",
"Action Capabilities",
"Action Possibilities",
"Augmented Reality",
"Avatar",
"Better Perceive Affordances",
"Concrete Application Scenarios",
"Controllable Digital Copy",
"Distant Locations",
"Distant Space",
"Larger Conceptual Framework",
"Local Space",
"Mental Representation",
"Physical Areas",
"Physical Position",
"Remote Areas",
"Self Avatars",
"Techniques",
"Third Person Embodiment",
"Third Person Perspective",
"Navigation",
"Avatars",
"Affordances",
"Decision Making",
"Aerospace Electronics",
"Augmented Reality",
"Guidelines",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "Inria, Univ. Bordeaux, LaBRI, CNRS",
"fullName": "Adélaïde Genay",
"givenName": "Adélaïde",
"surname": "Genay",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria, Univ. Rennes, IRISA, CNRS",
"fullName": "Anatole Lécuyer",
"givenName": "Anatole",
"surname": "Lécuyer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria, Univ. Bordeaux, LaBRI, CNRS",
"fullName": "Martin Hachet",
"givenName": "Martin",
"surname": "Hachet",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "450-459",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1JrQVgVAEE0",
"name": "pismar202253250-09995452s1-mm_532500a450.zip",
"size": "35.9 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202253250-09995452s1-mm_532500a450.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "532500a441",
"articleId": "1JrRbIVIzPG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a460",
"articleId": "1JrR6BnYp6U",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-amh/2009/5508/0/05336723",
"title": "EYEPLY: Baseball proof of concept — Mobile augmentation for entertainment and shopping venues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2009/05336723/12OmNqJq4k0",
"parentPublication": {
"id": "proceedings/ismar-amh/2009/5508/0",
"title": "2009 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media and Humanities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a194",
"title": "[POSTER] Avatar-Mediated Contact Interaction between Remote Users for Social Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a194/12OmNvTTcga",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549341",
"title": "Keynote speaker: Infinite reality: Avatars, eternal life, new worlds, and the dawn of the virtual revolution",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549341/12OmNvzJGas",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a730",
"title": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a730/1CJffY1QgeI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a308",
"title": "Empirically Evaluating the Effects of Eye Height and Self-Avatars on Dynamic Passability Affordances in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a308/1MNgWLowz1m",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a096",
"title": "Stepping over Obstacles with Augmented Reality based on Visual Exproprioception",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a096/1pBMiFPYlkA",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a462",
"title": "Body Weight Perception of Females using Photorealistic Avatars in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a462/1pysu9tPcGc",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIx9zwn7SE",
"doi": "10.1109/VR46266.2020.00073",
"title": "Comparative Evaluation of Viewing and Self-Representation on Passability Affordances to a Realistic Sliding Doorway in Real and Immersive Virtual Environments",
"normalizedTitle": "Comparative Evaluation of Viewing and Self-Representation on Passability Affordances to a Realistic Sliding Doorway in Real and Immersive Virtual Environments",
"abstract": "As Virtual Reality (VR) devices become more accessible, a multitude of VR applications engage users in highly immersive virtual environments that feature realistic graphics, real-life scenarios, and self-avatars. Many of these simulations require users to make spontaneous affordance judgments such as stepping over obstacles, passing through gaps, etc. which are shown to be affected by the nature of our self-representation in the virtual world. As the technology for creating self-avatars becomes more widely available, it is important to explore how various affordance judgments are affected by the presence of self-avatars. In this work, we investigate the effects of body-scaled self-avatars on the affordance of passability in a natural setting. We implemented a gender-matched body-scaled self-avatar using HTC Vive trackers and evaluated how passability judgments for a sliding doorway in VR, with and without an avatar, compared to the real world judgments. The results suggest that passability judgments are more conservative in VR as compared to the real world. However, the presence of a self-avatar does not significantly affect passability judgments made in VR. This does not align with previous findings which show that having a self-avatar improves judgments and estimates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As Virtual Reality (VR) devices become more accessible, a multitude of VR applications engage users in highly immersive virtual environments that feature realistic graphics, real-life scenarios, and self-avatars. Many of these simulations require users to make spontaneous affordance judgments such as stepping over obstacles, passing through gaps, etc. which are shown to be affected by the nature of our self-representation in the virtual world. As the technology for creating self-avatars becomes more widely available, it is important to explore how various affordance judgments are affected by the presence of self-avatars. In this work, we investigate the effects of body-scaled self-avatars on the affordance of passability in a natural setting. We implemented a gender-matched body-scaled self-avatar using HTC Vive trackers and evaluated how passability judgments for a sliding doorway in VR, with and without an avatar, compared to the real world judgments. The results suggest that passability judgments are more conservative in VR as compared to the real world. However, the presence of a self-avatar does not significantly affect passability judgments made in VR. This does not align with previous findings which show that having a self-avatar improves judgments and estimates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As Virtual Reality (VR) devices become more accessible, a multitude of VR applications engage users in highly immersive virtual environments that feature realistic graphics, real-life scenarios, and self-avatars. Many of these simulations require users to make spontaneous affordance judgments such as stepping over obstacles, passing through gaps, etc. which are shown to be affected by the nature of our self-representation in the virtual world. As the technology for creating self-avatars becomes more widely available, it is important to explore how various affordance judgments are affected by the presence of self-avatars. In this work, we investigate the effects of body-scaled self-avatars on the affordance of passability in a natural setting. We implemented a gender-matched body-scaled self-avatar using HTC Vive trackers and evaluated how passability judgments for a sliding doorway in VR, with and without an avatar, compared to the real world judgments. The results suggest that passability judgments are more conservative in VR as compared to the real world. However, the presence of a self-avatar does not significantly affect passability judgments made in VR. This does not align with previous findings which show that having a self-avatar improves judgments and estimates.",
"fno": "09089645",
"keywords": [
"Affordances",
"Virtual Environments",
"Avatars",
"Virtual Reality",
"Real Time Systems",
"Interactive Systems",
"Self Avatars",
"Affordance",
"Passability",
"Virtual Reality",
"Human Centered Computing",
"Empirical Studies In HCI",
"Human Centered Computing",
"Interaction Design"
],
"authors": [
{
"affiliation": "Key Lime Interactive",
"fullName": "Ayush Bhargava",
"givenName": "Ayush",
"surname": "Bhargava",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University,Department of Psychology",
"fullName": "Hannah Solini",
"givenName": "Hannah",
"surname": "Solini",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University,Department of Psychology",
"fullName": "Kathryn Lucaites",
"givenName": "Kathryn",
"surname": "Lucaites",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University Center for Workforce Development",
"fullName": "Jeffrey W. Bertrand",
"givenName": "Jeffrey W.",
"surname": "Bertrand",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University,School of Computing",
"fullName": "Andrew Robb",
"givenName": "Andrew",
"surname": "Robb",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University,Department of Psychology",
"fullName": "Christopher C. Pagano",
"givenName": "Christopher C.",
"surname": "Pagano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University,School of Computing",
"fullName": "Sabarish V. Babu",
"givenName": "Sabarish V.",
"surname": "Babu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "519-528",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089442",
"articleId": "1jIxe7ldiE0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089596",
"articleId": "1jIx7ELvYVa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446229",
"title": "Any “Body” There? Avatar Visibility Effects in a Virtual Reality Game",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446229/13bd1fHrlRx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446189",
"title": "Towards Revisiting Passability Judgments in Real and Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446189/13bd1fdV4lC",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404654",
"title": "Toward \"Pseudo-Haptic Avatars\": Modifying the Visual Animation of Self-Avatar Can Simulate the Perception of Weight Lifting",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404654/13rRUyft7D4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714123",
"title": "The Impact of Embodiment and Avatar Sizing on Personal Space in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714123/1B0Y0yXxNbG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a001",
"title": "A Cardboard-Based Virtual Reality Study on Self-Avatar Appearance and Breathing",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a001/1CJdXjsLKBG",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049626",
"title": "Can I Squeeze Through? Effects of Self-Avatars and Calibration in a Person-Plus-Virtual-Object System on Perceived Lateral Passability in VR",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049626/1KYoySw7RM4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a308",
"title": "Empirically Evaluating the Effects of Eye Height and Self-Avatars on Dynamic Passability Affordances in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a308/1MNgWLowz1m",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090630",
"title": "Embodied Realistic Avatar System with Body Motions and Facial Expressions for Communication in Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090630/1jIxtbZL30Y",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09440766",
"title": "Did I Hit the Door? Effects of Self-Avatars and Calibration in a Person-Plus-Virtual-Object System on Perceived Frontal Passability in VR",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09440766/1tTpcuKN5jW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2021/2463/0/246300b036",
"title": "How Do Avatar Appearances Affect Communication from Others?",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2021/246300b036/1wLcE9cNine",
"parentPublication": {
"id": "proceedings/compsac/2021/2463/0",
"title": "2021 IEEE 45th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAsCE62Tm",
"doi": "10.1109/VR50410.2021.00013",
"title": "Self-Avatars in Immersive Technology",
"normalizedTitle": "Self-Avatars in Immersive Technology",
"abstract": "Summary form only given, as follows. The complete presentation was not made available for publication as part of the conference proceedings. Betty Mohler joined Amazon in 2018 as a Principal Research Scientist at the Amazon Research Development Center. However, the research of this presentation was conducted prior to Betty Mohler's position at Amazon and therefore does not reflect her research or future work at Amazon. The research she will be discussing considers the physical, experienced and visual body of the user. In her talk at the IEEE VR Conference, Mohler will discuss the benefits and challenges of adding self-avatars to immersive technology, based on her research reports about how self-avatars are central to enabling personalized and immersive experiences and also how the human body should be carefully considered when designing immersive technology.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Summary form only given, as follows. The complete presentation was not made available for publication as part of the conference proceedings. Betty Mohler joined Amazon in 2018 as a Principal Research Scientist at the Amazon Research Development Center. However, the research of this presentation was conducted prior to Betty Mohler's position at Amazon and therefore does not reflect her research or future work at Amazon. The research she will be discussing considers the physical, experienced and visual body of the user. In her talk at the IEEE VR Conference, Mohler will discuss the benefits and challenges of adding self-avatars to immersive technology, based on her research reports about how self-avatars are central to enabling personalized and immersive experiences and also how the human body should be carefully considered when designing immersive technology.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Summary form only given, as follows. The complete presentation was not made available for publication as part of the conference proceedings. Betty Mohler joined Amazon in 2018 as a Principal Research Scientist at the Amazon Research Development Center. However, the research of this presentation was conducted prior to Betty Mohler's position at Amazon and therefore does not reflect her research or future work at Amazon. The research she will be discussing considers the physical, experienced and visual body of the user. In her talk at the IEEE VR Conference, Mohler will discuss the benefits and challenges of adding self-avatars to immersive technology, based on her research reports about how self-avatars are central to enabling personalized and immersive experiences and also how the human body should be carefully considered when designing immersive technology.",
"fno": "255600z023",
"keywords": [
"Virtual Reality",
"Visualization",
"User Interfaces",
"Three Dimensional Displays",
"Philosophical Considerations",
"Conferences",
"Computer Vision"
],
"authors": [],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "xxiii-xxiii",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "255600z020",
"articleId": "1tuAyYF2bdK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600z024",
"articleId": "1tuBi2ybUaI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2016/9041/0/9041a543",
"title": "Categorization of Embodied User Interface in Immersive Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2016/9041a543/12OmNC1Gu9J",
"parentPublication": {
"id": "proceedings/icalt/2016/9041/0",
"title": "2016 IEEE 16th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446539",
"title": "Investigating the Effects of Anthropomorphic Fidelity of Self-Avatars on Near Field Depth Perception in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446539/13bd1h03qOe",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2006/03/u3006",
"title": "Haptics in Virtual Reality and Multimedia",
"doi": null,
"abstractUrl": "/magazine/mu/2006/03/u3006/13rRUxASumN",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a936",
"title": "[DC] Immersive Analytics for Understanding Ecosystem Services Tradeoffs",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a936/1CJcFsf3SU0",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a650",
"title": "Emotional Avatars: Effect of Uncanniness in Identifying Emotions using Avatar Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a650/1CJdQj37aw0",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798207",
"title": "[DC] Self-Adaptive Technologies for Immersive Trainings",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798207/1cJ10bYBC2Q",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090454",
"title": "The other way: immersive VR storytelling through biking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090454/1jIxszQHffq",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a633",
"title": "Immersive Authoring of Virtual Reality Training",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a633/1tnXNG6t1x6",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a589",
"title": "Simulation and Assessment of Safety Procedure in an Immersive Virtual Reality (IVR) Laboratory",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a589/1tnXRaYRcdi",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a438",
"title": "Impact of Avatar Anthropomorphism and Task Type on Social Presence in Immersive Collaborative Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a438/1tnXuRl9EJi",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1zxLs2qb1yU",
"title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1zxLAQ4T0ac",
"doi": "10.1109/AIVR52153.2021.00068",
"title": "The Ethics of Rehabilitation in Virtual Reality: the role of Self-Avatars and Deep Learning",
"normalizedTitle": "The Ethics of Rehabilitation in Virtual Reality: the role of Self-Avatars and Deep Learning",
"abstract": "Medical Rehabilitation systems are constantly improving according to the technological evolution, and the use of virtual reality for assistive purposes is being investigated in research. One particular case of rehabilitation regards the hands and upper limbs of hemiplegic post-stroke patients. While studying possible ways to support this population, we discuss the ethical issues that arise from the use of extended reality technologies, in particular self-avatars, and the use of artificial intelligence, in particular deep learning, in neuro-cognitive applications. We present a rehabilitation approach, based on the digital embodiment of virtual limbs, in which the movements of the self-avatars are modified and optimized by the system, to lead the patients to the performance of the natural actions they lost. The ethical discussion starts from the policy of the representation of the self in virtual environments, and additional issues arise when the users have disabilities. A learning system, based on a convolutional neural network, allows the personalization of the parameters of the therapy in the long term. The collection and analysis of physiological data is also discussed, again in a scenario that involves vulnerable users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Medical Rehabilitation systems are constantly improving according to the technological evolution, and the use of virtual reality for assistive purposes is being investigated in research. One particular case of rehabilitation regards the hands and upper limbs of hemiplegic post-stroke patients. While studying possible ways to support this population, we discuss the ethical issues that arise from the use of extended reality technologies, in particular self-avatars, and the use of artificial intelligence, in particular deep learning, in neuro-cognitive applications. We present a rehabilitation approach, based on the digital embodiment of virtual limbs, in which the movements of the self-avatars are modified and optimized by the system, to lead the patients to the performance of the natural actions they lost. The ethical discussion starts from the policy of the representation of the self in virtual environments, and additional issues arise when the users have disabilities. A learning system, based on a convolutional neural network, allows the personalization of the parameters of the therapy in the long term. The collection and analysis of physiological data is also discussed, again in a scenario that involves vulnerable users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Medical Rehabilitation systems are constantly improving according to the technological evolution, and the use of virtual reality for assistive purposes is being investigated in research. One particular case of rehabilitation regards the hands and upper limbs of hemiplegic post-stroke patients. While studying possible ways to support this population, we discuss the ethical issues that arise from the use of extended reality technologies, in particular self-avatars, and the use of artificial intelligence, in particular deep learning, in neuro-cognitive applications. We present a rehabilitation approach, based on the digital embodiment of virtual limbs, in which the movements of the self-avatars are modified and optimized by the system, to lead the patients to the performance of the natural actions they lost. The ethical discussion starts from the policy of the representation of the self in virtual environments, and additional issues arise when the users have disabilities. A learning system, based on a convolutional neural network, allows the personalization of the parameters of the therapy in the long term. The collection and analysis of physiological data is also discussed, again in a scenario that involves vulnerable users.",
"fno": "322500a324",
"keywords": [
"Avatars",
"Cognition",
"Deep Learning Artificial Intelligence",
"Ethical Aspects",
"Neural Nets",
"Patient Rehabilitation",
"Virtual Reality",
"Technological Evolution",
"Assistive Purposes",
"Upper Limbs",
"Hemiplegic Post Stroke Patients",
"Ethical Issues",
"Extended Reality Technologies",
"Self Avatars",
"Artificial Intelligence",
"Deep Learning",
"Neuro Cognitive Applications",
"Rehabilitation Approach",
"Virtual Limbs",
"Ethical Discussion",
"Virtual Environments",
"Additional Issues",
"Learning System",
"Rehabilitation Ethics",
"Medical Rehabilitation Systems",
"Deep Learning",
"Learning Systems",
"Ethics",
"Extended Reality",
"Sociology",
"Virtual Environments",
"Medical Treatment",
"Virtual Reality",
"Rehabilitation",
"Ethics",
"Self Avatars",
"Deep Learning"
],
"authors": [
{
"affiliation": "University of Torino,Department of Computer Science,Torino,Italy",
"fullName": "Agata Marta Soccini",
"givenName": "Agata Marta",
"surname": "Soccini",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Torino,Department of Computer Science,Torino,Italy",
"fullName": "Federica Cena",
"givenName": "Federica",
"surname": "Cena",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "324-328",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3225-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "322500a319",
"articleId": "1zxLsKlsBtS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "322500a329",
"articleId": "1zxLB5H6KQg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmeae/2013/2253/0/2253a164",
"title": "CPM Ankle Rehabilitation Machine with EMG Signal Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icmeae/2013/2253a164/12OmNCbCrJp",
"parentPublication": {
"id": "proceedings/icmeae/2013/2253/0",
"title": "International Conference on Mechatronics, Electronics and Automotive Engineering (ICMEAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2017/6724/0/07926560",
"title": "Gamification of Hand Rehabilitation Process Using Virtual Reality Tools: Using Leap Motion for Hand Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2017/07926560/12OmNqGiu27",
"parentPublication": {
"id": "proceedings/irc/2017/6724/0",
"title": "2017 First IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2013/0965/0/06624247",
"title": "Use of gaming sensors and customised exergames for parkinson's disease rehabilitation: A proposed virtual reality framework",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2013/06624247/12OmNx7G69o",
"parentPublication": {
"id": "proceedings/vs-games/2013/0965/0",
"title": "2013 5th International Conference on Games and Virtual Worlds for Serious Applications (VS-GAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2006/04/x4056",
"title": "An Approach to Computing Ethics",
"doi": null,
"abstractUrl": "/magazine/ex/2006/04/x4056/13rRUyuNsBp",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/i-span/2018/8534/0/853400a253",
"title": "An Upper Extremity Rehabilitation System Using Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/i-span/2018/853400a253/17D45WWzW5h",
"parentPublication": {
"id": "proceedings/i-span/2018/8534/0",
"title": "2018 15th International Symposium on Pervasive Systems, Algorithms and Networks (I-SPAN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/euros&pw/2022/9560/0/956000a538",
"title": "Ethics in Security Research: Visions, Reality, and Paths Forward",
"doi": null,
"abstractUrl": "/proceedings-article/euros&pw/2022/956000a538/1Eygz9ETYM8",
"parentPublication": {
"id": "proceedings/euros&pw/2022/9560/0",
"title": "2022 IEEE European Symposium on Security and Privacy Workshops (EuroS&PW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2019/4617/0/461700a617",
"title": "Meta-Learning for Avatar Kinematics Reconstruction in Virtual Reality Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2019/461700a617/1grPfjAXxOo",
"parentPublication": {
"id": "proceedings/bibe/2019/4617/0",
"title": "2019 IEEE 19th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a371",
"title": "Virtual Environments for Therapeutic Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a371/1oZBCC6FE8o",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a217",
"title": "Lower Limb Balance Rehabilitation of Post-stroke Patients Using an Evaluating and Training Combined Augmented Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a217/1pBMhnkqb04",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icssa/2019/5912/0/591200a070",
"title": "Acceptance of Virtual Health Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/icssa/2019/591200a070/1q0FToF6GrK",
"parentPublication": {
"id": "proceedings/icssa/2019/5912/0",
"title": "2019 International Conference on Software Security and Assurance (ICSSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvnwVj4",
"doi": "10.1109/VR.2017.7892278",
"title": "Bodiless embodiment: A descriptive survey of avatar bodily coherence in first-wave consumer VR applications",
"normalizedTitle": "Bodiless embodiment: A descriptive survey of avatar bodily coherence in first-wave consumer VR applications",
"abstract": "This preliminary study surveys whether/which avatar body parts are visible in first-wave consumer virtual reality (VR) applications for the HTC Vive (n = 200). A simple coding schema for assessing avatar bodily coherence (ABC) is piloted and evaluated. Results provide a snapshot of ABC in popular high-end VR applications in Q3 2016. It is reported (Table 1) that 86.5% of sampled items feature fully invisible avatars, 9% depict hands only, and 4.5% feature a head, torso, or legs, but with some degree of bodily incoherence. Findings suggest that users may experience a sense of ownership and/or agency over their virtual actions even in the absence of visible avatar body parts. This informs research questions and hypotheses for future experimental enquiry into how bodily representation interplays with user cognition, perceived virtual embodiment (body ownership illusion and sense of agency), and spatial telepresence (hereafter spatial presence). For instance: To what extent/under what conditions do the users of consumer VR systems demonstrate a sense of bodily vulnerability (a drive for bodily preservation) when no virtual body is present/visible?",
"abstracts": [
{
"abstractType": "Regular",
"content": "This preliminary study surveys whether/which avatar body parts are visible in first-wave consumer virtual reality (VR) applications for the HTC Vive (n = 200). A simple coding schema for assessing avatar bodily coherence (ABC) is piloted and evaluated. Results provide a snapshot of ABC in popular high-end VR applications in Q3 2016. It is reported (Table 1) that 86.5% of sampled items feature fully invisible avatars, 9% depict hands only, and 4.5% feature a head, torso, or legs, but with some degree of bodily incoherence. Findings suggest that users may experience a sense of ownership and/or agency over their virtual actions even in the absence of visible avatar body parts. This informs research questions and hypotheses for future experimental enquiry into how bodily representation interplays with user cognition, perceived virtual embodiment (body ownership illusion and sense of agency), and spatial telepresence (hereafter spatial presence). For instance: To what extent/under what conditions do the users of consumer VR systems demonstrate a sense of bodily vulnerability (a drive for bodily preservation) when no virtual body is present/visible?",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This preliminary study surveys whether/which avatar body parts are visible in first-wave consumer virtual reality (VR) applications for the HTC Vive (n = 200). A simple coding schema for assessing avatar bodily coherence (ABC) is piloted and evaluated. Results provide a snapshot of ABC in popular high-end VR applications in Q3 2016. It is reported (Table 1) that 86.5% of sampled items feature fully invisible avatars, 9% depict hands only, and 4.5% feature a head, torso, or legs, but with some degree of bodily incoherence. Findings suggest that users may experience a sense of ownership and/or agency over their virtual actions even in the absence of visible avatar body parts. This informs research questions and hypotheses for future experimental enquiry into how bodily representation interplays with user cognition, perceived virtual embodiment (body ownership illusion and sense of agency), and spatial telepresence (hereafter spatial presence). For instance: To what extent/under what conditions do the users of consumer VR systems demonstrate a sense of bodily vulnerability (a drive for bodily preservation) when no virtual body is present/visible?",
"fno": "07892278",
"keywords": [
"Avatars",
"Coherence",
"Virtual Environments",
"Media",
"Biological System Modeling",
"Encoding",
"Agency",
"Avatar",
"Embodiment",
"Spatial Presence",
"VR"
],
"authors": [
{
"affiliation": "University of Copenhagen, Denmark",
"fullName": "Dooley Murphy",
"givenName": "Dooley",
"surname": "Murphy",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "265-266",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892277",
"articleId": "12OmNz3bdR0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892279",
"articleId": "12OmNBEGYJE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223377",
"title": "Avatar embodiment realism and virtual fitness training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223377/12OmNCcKQFn",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892275",
"title": "Socially immersive avatar-based communication",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892275/12OmNwEJ0VR",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446229",
"title": "Any “Body” There? Avatar Visibility Effects in a Virtual Reality Game",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446229/13bd1fHrlRx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a260",
"title": "The Effects of Avatar and Environment Design on Embodiment, Presence, Activation, and Task Load in a Virtual Reality Exercise Application",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a260/1JrRf0Dbcac",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049676",
"title": "The Impact of Avatar and Environment Congruence on Plausibility, Embodiment, Presence, and the Proteus Effect in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049676/1KYosbnM8q4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090457",
"title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090630",
"title": "Embodied Realistic Avatar System with Body Motions and Facial Expressions for Communication in Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090630/1jIxtbZL30Y",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a054",
"title": "The Effects of Body Tracking Fidelity on Embodiment of an Inverse-Kinematic Avatar for Male Participants",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a054/1pyswgi4b7y",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a127",
"title": "Evidence for a Relationship Between Self-Avatar Fixations and Perceived Avatar Similarity within Low-Cost Virtual Reality Embodiment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a127/1tnXDDh8sqk",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgRmjl6Zq",
"doi": "10.1109/VR55154.2023.00024",
"title": "I'm Transforming! Effects of Visual Transitions to Change of Avatar on the Sense of Embodiment in AR",
"normalizedTitle": "I'm Transforming! Effects of Visual Transitions to Change of Avatar on the Sense of Embodiment in AR",
"abstract": "Virtual avatars are more and more often featured in Virtual Reality (VR) and Augmented Reality (AR) applications. When embodying a virtual avatar, one may desire to change of appearance over the course of the embodiment. However, switching suddenly from one appearance to another can break the continuity of the user experience and potentially impact the sense of embodiment (SoE), especially when the new appearance is very different. In this paper, we explore how applying smooth visual transitions at the moment of the change can help to maintain the SoE and benefit the general user experience. To address this, we implemented an AR system allowing users to embody a regular-shaped avatar that can be transformed into a muscular one through a visual effect. The avatar's transformation can be triggered either by the user through physical action (“active” transition), or automatically launched by the system (“passive” transition). We conducted a user study to evaluate the effects of these two types of transformations on the SoE by comparing them to control conditions where there was no visual feedback of the transformation. Our results show that changing the appearance of one's avatar with an active transition (with visual feedback), compared to a passive transition, helps to maintain the user's sense of agency, a component of the SoE. They also partially suggest that the Proteus effects experienced during the embodiment were enhanced by these transitions. Therefore, we conclude that visual effects controlled by the user when changing their avatar's appearance can benefit their experience by preserving the SoE and intensifying the Proteus effects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual avatars are more and more often featured in Virtual Reality (VR) and Augmented Reality (AR) applications. When embodying a virtual avatar, one may desire to change of appearance over the course of the embodiment. However, switching suddenly from one appearance to another can break the continuity of the user experience and potentially impact the sense of embodiment (SoE), especially when the new appearance is very different. In this paper, we explore how applying smooth visual transitions at the moment of the change can help to maintain the SoE and benefit the general user experience. To address this, we implemented an AR system allowing users to embody a regular-shaped avatar that can be transformed into a muscular one through a visual effect. The avatar's transformation can be triggered either by the user through physical action (“active” transition), or automatically launched by the system (“passive” transition). We conducted a user study to evaluate the effects of these two types of transformations on the SoE by comparing them to control conditions where there was no visual feedback of the transformation. Our results show that changing the appearance of one's avatar with an active transition (with visual feedback), compared to a passive transition, helps to maintain the user's sense of agency, a component of the SoE. They also partially suggest that the Proteus effects experienced during the embodiment were enhanced by these transitions. Therefore, we conclude that visual effects controlled by the user when changing their avatar's appearance can benefit their experience by preserving the SoE and intensifying the Proteus effects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual avatars are more and more often featured in Virtual Reality (VR) and Augmented Reality (AR) applications. When embodying a virtual avatar, one may desire to change of appearance over the course of the embodiment. However, switching suddenly from one appearance to another can break the continuity of the user experience and potentially impact the sense of embodiment (SoE), especially when the new appearance is very different. In this paper, we explore how applying smooth visual transitions at the moment of the change can help to maintain the SoE and benefit the general user experience. To address this, we implemented an AR system allowing users to embody a regular-shaped avatar that can be transformed into a muscular one through a visual effect. The avatar's transformation can be triggered either by the user through physical action (“active” transition), or automatically launched by the system (“passive” transition). We conducted a user study to evaluate the effects of these two types of transformations on the SoE by comparing them to control conditions where there was no visual feedback of the transformation. Our results show that changing the appearance of one's avatar with an active transition (with visual feedback), compared to a passive transition, helps to maintain the user's sense of agency, a component of the SoE. They also partially suggest that the Proteus effects experienced during the embodiment were enhanced by these transitions. Therefore, we conclude that visual effects controlled by the user when changing their avatar's appearance can benefit their experience by preserving the SoE and intensifying the Proteus effects.",
"fno": "481500a083",
"keywords": [
"Human Computer Interaction",
"Visualization",
"Three Dimensional Displays",
"Avatars",
"Switches",
"Visual Effects",
"User Experience",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality",
"Human Centered Computing Human Computer Interaction HCI Empirical Studies In HCI"
],
"authors": [
{
"affiliation": "NAIST,Japan",
"fullName": "Riku Otono",
"givenName": "Riku",
"surname": "Otono",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria,Bordeaux,France",
"fullName": "Adélaïde Genay",
"givenName": "Adélaïde",
"surname": "Genay",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NAIST,Japan",
"fullName": "Monica Perusquía-Hernández",
"givenName": "Monica",
"surname": "Perusquía-Hernández",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NAIST,Japan",
"fullName": "Naoya Isoyama",
"givenName": "Naoya",
"surname": "Isoyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NAIST,Japan",
"fullName": "Hideaki Uchiyama",
"givenName": "Hideaki",
"surname": "Uchiyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria,Bordeaux,France",
"fullName": "Martin Hachet",
"givenName": "Martin",
"surname": "Hachet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria,Bordeaux,France",
"fullName": "Anatole Lécuyer",
"givenName": "Anatole",
"surname": "Lécuyer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NAIST,Japan",
"fullName": "Kiyoshi Kiyokawa",
"givenName": "Kiyoshi",
"surname": "Kiyokawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "83-93",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "481500a072",
"articleId": "1MNgmRWwNUI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a094",
"articleId": "1MNgWtYsR5S",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/04/ttg2013040591",
"title": "An Evaluation of Self-Avatar Eye Movement for Virtual Embodiment",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040591/13rRUyYBlgz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714123",
"title": "The Impact of Embodiment and Avatar Sizing on Personal Space in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714123/1B0Y0yXxNbG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a730",
"title": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a730/1CJffY1QgeI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a503",
"title": "Studying “Avatar Transitions” in Augmented Reality: Influence on Sense of Embodiment and Physiological Activity",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a503/1J7W9twFolO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a141",
"title": "Petting a cat helps you incarnate the avatar: Influence of the emotions over embodiment in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a141/1JrRepqALbW",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798263",
"title": "EEG Can Be Used to Measure Embodiment When Controlling a Walking Self-Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798263/1cJ1gj5NtQc",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998305",
"title": "Avatar and Sense of Embodiment: Studying the Relative Preference Between Appearance, Control and Point of View",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998305/1hpPBuW1ahy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090457",
"title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a054",
"title": "The Effects of Body Tracking Fidelity on Embodiment of an Inverse-Kinematic Avatar for Male Participants",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a054/1pyswgi4b7y",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAS4s4",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"acronym": "robot",
"groupId": "1000639",
"volume": "0",
"displayVolume": "0",
"year": "1992",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAgY7lY",
"doi": "10.1109/ROBOT.1992.220015",
"title": "An experimental study of hierarchical control laws for grasping and manipulation using a two-fingered planar hand",
"normalizedTitle": "An experimental study of hierarchical control laws for grasping and manipulation using a two-fingered planar hand",
"abstract": "Compares the performance of hierarchical and single-level controllers in a grasping context, and concludes that for rapid, planar grasping motions of heavy objects the performance of a hierarchical control structure is superior to that of the two single-level controllers tested. Although the theory discussed applies to grasping problems of arbitrary complexity, the focus is on planar, two-fingered grasping for the sake of clarity and to simplify implementation and experimental testing of the proposed control algorithms. The control algorithms have been implemented on a multifingered hand.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Compares the performance of hierarchical and single-level controllers in a grasping context, and concludes that for rapid, planar grasping motions of heavy objects the performance of a hierarchical control structure is superior to that of the two single-level controllers tested. Although the theory discussed applies to grasping problems of arbitrary complexity, the focus is on planar, two-fingered grasping for the sake of clarity and to simplify implementation and experimental testing of the proposed control algorithms. The control algorithms have been implemented on a multifingered hand.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Compares the performance of hierarchical and single-level controllers in a grasping context, and concludes that for rapid, planar grasping motions of heavy objects the performance of a hierarchical control structure is superior to that of the two single-level controllers tested. Although the theory discussed applies to grasping problems of arbitrary complexity, the focus is on planar, two-fingered grasping for the sake of clarity and to simplify implementation and experimental testing of the proposed control algorithms. The control algorithms have been implemented on a multifingered hand.",
"fno": "00220015",
"keywords": [
"Hierarchical Systems",
"Manipulators",
"Multivariable Control Systems",
"Hierarchical Control Laws",
"Grasping",
"Manipulation",
"Two Fingered Planar Hand",
"Single Level Controllers",
"Control Algorithms",
"Multifingered Hand",
"Grasping",
"Motion Control",
"PD Control",
"Jacobian Matrices",
"Robot Control",
"Force Control",
"Equations",
"Fingers",
"Transmission Line Matrix Methods",
"Proportional Control"
],
"authors": [
{
"affiliation": "Dept. of EECS, California Univ., Berkeley, CA, USA",
"fullName": "K. Hollerbach",
"givenName": "K.",
"surname": "Hollerbach",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "R.M. Murray",
"givenName": "R.M.",
"surname": "Murray",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "S.S. Sastry",
"givenName": "S.S.",
"surname": "Sastry",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "robot",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1992-01-01T00:00:00",
"pubType": "proceedings",
"pages": "2770,2771,2772,2773,2774,2775",
"year": "1992",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00220014",
"articleId": "12OmNqyDjsV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00220016",
"articleId": "12OmNqIhG70",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/robot/1988/0852/0/00012053",
"title": "Inverse kinematics for a multifingered hand",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012053/12OmNAlvI27",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1989/1938/0/00100100",
"title": "Finger force computation for manipulation of an object by a multifingered robot hand",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1989/00100100/12OmNAoDhTb",
"parentPublication": {
"id": "proceedings/robot/1989/1938/0",
"title": "1989 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012055",
"title": "Evaluation and determination of grasping forces for multifingered hands",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012055/12OmNqzu6Kx",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1993/3870/0/00378142",
"title": "Grasping visual symmetry",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1993/00378142/12OmNrJAdQW",
"parentPublication": {
"id": "proceedings/iccv/1993/3870/0",
"title": "1993 (4th) International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1989/1938/0/00100054",
"title": "Control experiments in planar manipulation and grasping",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1989/00100054/12OmNvA1hew",
"parentPublication": {
"id": "proceedings/robot/1989/1938/0",
"title": "1989 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isatp/2003/7770/0/01217206",
"title": "Planning four grasping points from images of planar objects",
"doi": null,
"abstractUrl": "/proceedings-article/isatp/2003/01217206/12OmNx2QUMo",
"parentPublication": {
"id": "proceedings/isatp/2003/7770/0",
"title": "ISATP'03: 5th IEEE International Symposium on Assembly and Task Planning",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00219919",
"title": "Grasp planning for multifingered robot hands",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00219919/12OmNxWuitB",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012078",
"title": "On grasping and coordinated manipulation by a multifingered robot hand",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012078/12OmNyywxC5",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446538",
"title": "A Realtime Virtual Grasping System for Manipulating Complex Objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446538/13bd1gzWkQa",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2021/0232/0/023200a172",
"title": "A Novel Multi-Fingered Hand for Robotic Grasp",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2021/023200a172/1yLPknJqxk4",
"parentPublication": {
"id": "proceedings/icnisc/2021/0232/0",
"title": "2021 7th Annual International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1fHrlRF",
"doi": "10.1109/VR.2018.8446399",
"title": "Three Haptic Shape-Feedback Controllers for Virtual Reality",
"normalizedTitle": "Three Haptic Shape-Feedback Controllers for Virtual Reality",
"abstract": "We present three new novel haptic controllers that render shape force feedback during interaction. 1) CLAW is a multi-purpose controller that renders tactile forces for common hand interactions, such as grasping, touching, and triggering grasped objects. 2) Haptic Revolver is a general-purpose handheld VR controller that renders touch contact with virtual surfaces, motion shear along a surface, textures, and shapes using interchangeable wheels. 3) Haptic Links haptic render shape feedback between two controllers using variable-stiffness locking mechanisms to provide force feedback for grasping and interacting with two-handed objects such as wind instruments, steering wheels, handle bars, or bow and arrow.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present three new novel haptic controllers that render shape force feedback during interaction. 1) CLAW is a multi-purpose controller that renders tactile forces for common hand interactions, such as grasping, touching, and triggering grasped objects. 2) Haptic Revolver is a general-purpose handheld VR controller that renders touch contact with virtual surfaces, motion shear along a surface, textures, and shapes using interchangeable wheels. 3) Haptic Links haptic render shape feedback between two controllers using variable-stiffness locking mechanisms to provide force feedback for grasping and interacting with two-handed objects such as wind instruments, steering wheels, handle bars, or bow and arrow.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present three new novel haptic controllers that render shape force feedback during interaction. 1) CLAW is a multi-purpose controller that renders tactile forces for common hand interactions, such as grasping, touching, and triggering grasped objects. 2) Haptic Revolver is a general-purpose handheld VR controller that renders touch contact with virtual surfaces, motion shear along a surface, textures, and shapes using interchangeable wheels. 3) Haptic Links haptic render shape feedback between two controllers using variable-stiffness locking mechanisms to provide force feedback for grasping and interacting with two-handed objects such as wind instruments, steering wheels, handle bars, or bow and arrow.",
"fno": "08446399",
"keywords": [
"Force Feedback",
"Haptic Interfaces",
"Rendering Computer Graphics",
"Virtual Reality",
"Multipurpose Controller",
"Common Hand Interactions",
"Grasped Objects",
"General Purpose Handheld VR Controller",
"Virtual Surfaces",
"Shape Feedback",
"Two Handed Objects",
"Tactile Force",
"Haptic Shape Feedback Controllers",
"Shape Force Feedback",
"Haptic Revolver",
"CLAW Controller",
"Rendering",
"Conferences",
"Virtual Reality",
"Three Dimensional Displays",
"User Interfaces",
"Haptics",
"Virtual Reality",
"Controller",
"X 0023 K 6 1 Management Of Computing And Information Systems",
"Project And People Management",
"Life Cycle",
"K 7 M The Computing Profession",
"Miscellaneous Ethics"
],
"authors": [
{
"affiliation": "Microsoft Research, Redmond, WA, USA",
"fullName": "Mike Sinclair",
"givenName": "Mike",
"surname": "Sinclair",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research, Redmond, WA, USA",
"fullName": "Eyal Ofek",
"givenName": "Eyal",
"surname": "Ofek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research, Redmond, WA, USA",
"fullName": "Christian Holz",
"givenName": "Christian",
"surname": "Holz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research, Redmond, WA, USA",
"fullName": "Inrak Choi",
"givenName": "Inrak",
"surname": "Choi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research, Redmond, WA, USA",
"fullName": "Eric Whitmire",
"givenName": "Eric",
"surname": "Whitmire",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research, Redmond, WA, USA",
"fullName": "Evan Strasnick",
"givenName": "Evan",
"surname": "Strasnick",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research, Redmond, WA, USA",
"fullName": "Hrvoje Benko",
"givenName": "Hrvoje",
"surname": "Benko",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "777-778",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446160",
"articleId": "13bd1fZBGbH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446506",
"articleId": "13bd1tMztYs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892308",
"title": "A haptic three-dimensional shape display with three fingers grasping",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892308/12OmNvA1hFe",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892328",
"title": "Effect on high versus low fidelity haptic feedback in a virtual reality baseball simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892328/12OmNym2bPM",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a083",
"title": "Tapping with a Handheld Stick in VR: Redirection Detection Thresholds for Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a083/1CJcjWU39wQ",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a618",
"title": "Retargeting Destinations of Passive Props for Enhancing Haptic Feedback in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a618/1CJeVmWfgWc",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a846",
"title": "AmbientTransfer: Presence Enhancement by Converting Video Ambient to Users' Somatosensory Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a846/1CJeeimnzmU",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797918",
"title": "Virtual Reality Training with Passive Haptic Feedback for CryoEM Sample Preparation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797918/1cJ14ZjqmCQ",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797956",
"title": "Haptic Compass: Active Vibrotactile Feedback of Physical Object for Path Guidance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797956/1cJ17BLEK88",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a575",
"title": "The Effect of the Virtual Object Size on Weight Perception Augmented with Pseudo-Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a575/1tnWwW9JGXC",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a183",
"title": "Understanding Emotional Expression with Haptic Feedback Vest Patterns and Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a183/1tnX9YpX3Nu",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a201",
"title": "Haptic-Enabled Buttons Through Adaptive Trigger Resistance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a201/1tnXoCxhKgw",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0LTyanRu",
"doi": "10.1109/VR.2019.8797869",
"title": "Feel the Globe: Enhancing the Perception of Immersive Spherical Visualizations with Tangible Proxies",
"normalizedTitle": "Feel the Globe: Enhancing the Perception of Immersive Spherical Visualizations with Tangible Proxies",
"abstract": "Recent developments in the commercialization of virtual reality open up many opportunities for enhancing human interaction with three-dimensional objects and visualizations. Spherical visualizations allow for convenient exploration of certain types of data. Our tangible sphere, exactly aligned with the sphere visualizations shown in VR, implements a very natural way of interaction and utilizes senses and skills trained in the real world. In a lab study, we investigate the effects of the perception of actually holding a virtual spherical visualization in hands. As use cases, we focus on surface visualizations that benefit from or require a rounded shape. We compared the usage of two differently sized acrylic glass spheres to a related interaction technique that utilizes VR controllers as proxies. On the one hand, our work is motivated by the ability to create in VR a tangible, lightweight, handheld spherical display that can hardly be realized in reality. On the other hand, gaining insights about the impact of a fully tangible embodiment of a virtual object on task performance, comprehension of patterns, and user behavior is important in its own right. After a description of the implementation we discuss the advantages and disadvantages of our approach, taking into account different handheld spherical displays utilizing outside and inside projection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent developments in the commercialization of virtual reality open up many opportunities for enhancing human interaction with three-dimensional objects and visualizations. Spherical visualizations allow for convenient exploration of certain types of data. Our tangible sphere, exactly aligned with the sphere visualizations shown in VR, implements a very natural way of interaction and utilizes senses and skills trained in the real world. In a lab study, we investigate the effects of the perception of actually holding a virtual spherical visualization in hands. As use cases, we focus on surface visualizations that benefit from or require a rounded shape. We compared the usage of two differently sized acrylic glass spheres to a related interaction technique that utilizes VR controllers as proxies. On the one hand, our work is motivated by the ability to create in VR a tangible, lightweight, handheld spherical display that can hardly be realized in reality. On the other hand, gaining insights about the impact of a fully tangible embodiment of a virtual object on task performance, comprehension of patterns, and user behavior is important in its own right. After a description of the implementation we discuss the advantages and disadvantages of our approach, taking into account different handheld spherical displays utilizing outside and inside projection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent developments in the commercialization of virtual reality open up many opportunities for enhancing human interaction with three-dimensional objects and visualizations. Spherical visualizations allow for convenient exploration of certain types of data. Our tangible sphere, exactly aligned with the sphere visualizations shown in VR, implements a very natural way of interaction and utilizes senses and skills trained in the real world. In a lab study, we investigate the effects of the perception of actually holding a virtual spherical visualization in hands. As use cases, we focus on surface visualizations that benefit from or require a rounded shape. We compared the usage of two differently sized acrylic glass spheres to a related interaction technique that utilizes VR controllers as proxies. On the one hand, our work is motivated by the ability to create in VR a tangible, lightweight, handheld spherical display that can hardly be realized in reality. On the other hand, gaining insights about the impact of a fully tangible embodiment of a virtual object on task performance, comprehension of patterns, and user behavior is important in its own right. After a description of the implementation we discuss the advantages and disadvantages of our approach, taking into account different handheld spherical displays utilizing outside and inside projection.",
"fno": "08797869",
"keywords": [
"Human Computer Interaction",
"Virtual Reality",
"Immersive Spherical Visualizations",
"Tangible Proxies",
"Virtual Reality",
"Human Interaction",
"Three Dimensional Objects",
"Tangible Sphere",
"Sphere Visualizations",
"Virtual Spherical Visualization",
"Surface Visualizations",
"Rounded Shape",
"VR Controllers",
"Tangible Display",
"Lightweight Display",
"Fully Tangible Embodiment",
"Virtual Object",
"Handheld Spherical Displays",
"Visualization",
"Three Dimensional Displays",
"Data Visualization",
"Hardware",
"Shape",
"Layout",
"Glass",
"Human Centered Computing X 2014 Interaction Paradigms X 2014 Virtual Reality"
],
"authors": [
{
"affiliation": "LMU Munich, Munich, Germany",
"fullName": "David Englmeier",
"givenName": "David",
"surname": "Englmeier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "LMU Munich, Munich, Germany",
"fullName": "Isabel Schönewald",
"givenName": "Isabel",
"surname": "Schönewald",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "LMU Munich, Munich, Germany",
"fullName": "Andreas Butz",
"givenName": "Andreas",
"surname": "Butz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California Santa Barbara, Santa Barbara, California",
"fullName": "Tobias Höllerer",
"givenName": "Tobias",
"surname": "Höllerer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1693-1698",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798038",
"articleId": "1cJ1eVQaSKQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797684",
"articleId": "1cJ0PpHt2HC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismarw/2016/3740/0/07836500",
"title": "A Tangible Volume for Portable 3D Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836500/12OmNCm7BGU",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2001/1004/0/10040031",
"title": "3D Shape Approximants via Spherical Wavelet Decompositions",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2001/10040031/12OmNqGA5az",
"parentPublication": {
"id": "proceedings/cbms/2001/1004/0",
"title": "Proceedings 14th IEEE Symposium on Computer-Based Medical Systems. CBMS 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549425",
"title": "Creating 3D Projection on tangible objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549425/12OmNqIzhfj",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156357",
"title": "Spherical layout and rendering methods for immersive graph visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156357/12OmNrFTr4v",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dvis/2014/6826/0/07160099",
"title": "Spatial augmented reality — A tool for 3D data visualization",
"doi": null,
"abstractUrl": "/proceedings-article/3dvis/2014/07160099/12OmNxxvAN7",
"parentPublication": {
"id": "proceedings/3dvis/2014/6826/0",
"title": "2014 IEEE VIS International Workshop on 3DVis (3DVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476611",
"title": "Poster: Tangible Controllers for 3D Widgets",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476611/12OmNzBOi9H",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a812",
"title": "Tangiball: Foot-Enabled Embodied Tangible Interaction with a Ball in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a812/1CJczvrAl0Y",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797887",
"title": "Sphere in Hand: Exploring Tangible Interaction with Immersive Spherical Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797887/1cJ15YywtBS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089453",
"title": "A Tangible Spherical Proxy for Object Manipulation in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089453/1jIxguSW9va",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a345",
"title": "Spherical World in Miniature: Exploring the Tiny Planets Metaphor for Discrete Locomotion in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a345/1tuAuPBgHTi",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1grOiRpGmv6",
"title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1grOkHlmimA",
"doi": "10.1109/AIVR46125.2019.00035",
"title": "Situation-Adaptive Object Grasping Recognition in VR Environment",
"normalizedTitle": "Situation-Adaptive Object Grasping Recognition in VR Environment",
"abstract": "In this paper, we propose a method for recognizing grasping of virtual objects in VR environment. The proposed method utilizes the fact that the position and shape of the virtual object to be grasped are known. A camera acquires an image of the user grasping a virtual object, and the posture of the hand is extracted from that image. The obtained hand posture is used to classify whether it is a grasping action or not. In order to evaluate the proposed method, we created a new dataset that was specialized for grasping virtual objects with a bare hand. There were three shapes and three positions of virtual objects in the dataset. The recognition rate of the classifier that was trained using the dataset with specific shapes of virtual objects was 93.18 %, and that with all the shapes of virtual objects was 87.71 %. This result shows that the recognition rate was improved by training the classifier using the shape-dependent dataset.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a method for recognizing grasping of virtual objects in VR environment. The proposed method utilizes the fact that the position and shape of the virtual object to be grasped are known. A camera acquires an image of the user grasping a virtual object, and the posture of the hand is extracted from that image. The obtained hand posture is used to classify whether it is a grasping action or not. In order to evaluate the proposed method, we created a new dataset that was specialized for grasping virtual objects with a bare hand. There were three shapes and three positions of virtual objects in the dataset. The recognition rate of the classifier that was trained using the dataset with specific shapes of virtual objects was 93.18 %, and that with all the shapes of virtual objects was 87.71 %. This result shows that the recognition rate was improved by training the classifier using the shape-dependent dataset.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a method for recognizing grasping of virtual objects in VR environment. The proposed method utilizes the fact that the position and shape of the virtual object to be grasped are known. A camera acquires an image of the user grasping a virtual object, and the posture of the hand is extracted from that image. The obtained hand posture is used to classify whether it is a grasping action or not. In order to evaluate the proposed method, we created a new dataset that was specialized for grasping virtual objects with a bare hand. There were three shapes and three positions of virtual objects in the dataset. The recognition rate of the classifier that was trained using the dataset with specific shapes of virtual objects was 93.18 %, and that with all the shapes of virtual objects was 87.71 %. This result shows that the recognition rate was improved by training the classifier using the shape-dependent dataset.",
"fno": "560400a171",
"keywords": [
"Virtual Reality",
"Machine Learning",
"Action Recognition",
"Grasp Detection"
],
"authors": [
{
"affiliation": "Saitama University",
"fullName": "Koki Hirota",
"givenName": "Koki",
"surname": "Hirota",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Saitama University",
"fullName": "Takashi Komuro",
"givenName": "Takashi",
"surname": "Komuro",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-12-01T00:00:00",
"pubType": "proceedings",
"pages": "171-1713",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-5604-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "560400a167",
"articleId": "1grOlldItiw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "560400a175",
"articleId": "1grOjh5wPWU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iros/1995/7108/2/71082348",
"title": "Reasoning simplified volumetric shapes for robotic grasping",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71082348/12OmNBgz4Bx",
"parentPublication": {
"id": "proceedings/iros/1995/7108/2",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2000/0868/0/08680373",
"title": "Intuitive Virtual Grasping for Non Haptic Environments",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2000/08680373/12OmNBtUdNx",
"parentPublication": {
"id": "proceedings/pg/2000/0868/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410479",
"title": "Virtual grasping for virtual assembly tasks",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410479/12OmNz6iOrG",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciicii/2017/2434/0/2434a047",
"title": "A Combined Texture-Shape Global 3D Feature Descriptor for Object Recognition and Grasping",
"doi": null,
"abstractUrl": "/proceedings-article/iciicii/2017/2434a047/12OmNzVXNZ7",
"parentPublication": {
"id": "proceedings/iciicii/2017/2434/0",
"title": "2017 International Conference on Industrial Informatics - Computing Technology, Intelligent Technology, Industrial Information Integration (ICIICII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446538",
"title": "A Realtime Virtual Grasping System for Manipulating Complex Objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446538/13bd1gzWkQa",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049658",
"title": "Comparing Different Grasping Visualizations for Object Manipulation in VR using Controllers",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049658/1KYotjCVD7W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2022/7260/0/726000a156",
"title": "Efficient Representations of Object Geometry for Reinforcement Learning of Interactive Grasping Policies",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2022/726000a156/1KckfYMs3ra",
"parentPublication": {
"id": "proceedings/irc/2022/7260/0",
"title": "2022 Sixth IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798155",
"title": "Grasping objects in immersive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798155/1cJ0SxJIrrW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a749",
"title": "Freehand Grasping: An Analysis of Grasping for Docking Tasks in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a749/1tuAgrCerNC",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a391",
"title": "A Grasp on Reality: Understanding Grasping Patterns for Object Interaction in Real and Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a391/1yeQOxGvsPK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIx82saxUY",
"doi": "10.1109/VR46266.2020.00046",
"title": "Precise and realistic grasping and manipulation in Virtual Reality without force feedback",
"normalizedTitle": "Precise and realistic grasping and manipulation in Virtual Reality without force feedback",
"abstract": "This paper introduces a physically-based approach of grasping and manipulation regarding virtual objects that would enable fine and stable grasping without haptic force feedback. The main contribution is to enhance an existing method which couples a virtual kinematic hand with a visual hand tracking system. The mismatches between the tracked and virtual hands often yield unstable grasps, especially for small objects. This is overcome by the implementation of grasping assistance based on virtual springs between the tracked and virtual hands. The assistance is triggered based on an analysis of usual grasping criteria, to determine whether a grasp is feasible or not. The proposed method has been validated in a supervised experiment which showed that our assistance improves speed and accuracy for a \"pick and place\" task involving an exhaustive object set, sized for precision grasp. Moreover, users’ feedback shows a clear preference for the present approach in terms of naturalness and efficiency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a physically-based approach of grasping and manipulation regarding virtual objects that would enable fine and stable grasping without haptic force feedback. The main contribution is to enhance an existing method which couples a virtual kinematic hand with a visual hand tracking system. The mismatches between the tracked and virtual hands often yield unstable grasps, especially for small objects. This is overcome by the implementation of grasping assistance based on virtual springs between the tracked and virtual hands. The assistance is triggered based on an analysis of usual grasping criteria, to determine whether a grasp is feasible or not. The proposed method has been validated in a supervised experiment which showed that our assistance improves speed and accuracy for a \"pick and place\" task involving an exhaustive object set, sized for precision grasp. Moreover, users’ feedback shows a clear preference for the present approach in terms of naturalness and efficiency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a physically-based approach of grasping and manipulation regarding virtual objects that would enable fine and stable grasping without haptic force feedback. The main contribution is to enhance an existing method which couples a virtual kinematic hand with a visual hand tracking system. The mismatches between the tracked and virtual hands often yield unstable grasps, especially for small objects. This is overcome by the implementation of grasping assistance based on virtual springs between the tracked and virtual hands. The assistance is triggered based on an analysis of usual grasping criteria, to determine whether a grasp is feasible or not. The proposed method has been validated in a supervised experiment which showed that our assistance improves speed and accuracy for a \"pick and place\" task involving an exhaustive object set, sized for precision grasp. Moreover, users’ feedback shows a clear preference for the present approach in terms of naturalness and efficiency.",
"fno": "09089499",
"keywords": [
"Grasping",
"Couplings",
"Visualization",
"Kinematics",
"Task Analysis",
"Virtual Reality",
"Robustness",
"Human Centered Computing Virtual Reality X 2014 Virtual Grasping",
"Human Centered Computing User Interface Design X 2014 Dexterous Interaction Precision Grasp",
"Human Centered Computing User Studies"
],
"authors": [
{
"affiliation": "CEA LIST,Interactive Simulation Laboratory,Palaiseau,F-91120",
"fullName": "Thibauld DELRIEU",
"givenName": "Thibauld",
"surname": "DELRIEU",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CEA LIST,Interactive Simulation Laboratory,Palaiseau,F-91120",
"fullName": "Vincent Weistroffer",
"givenName": "Vincent",
"surname": "Weistroffer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Université de Poitiers -CNRS - ENSMA,Institut PPRIME",
"fullName": "Jean Pierre Gazeau",
"givenName": "Jean Pierre",
"surname": "Gazeau",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "266-274",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089455",
"articleId": "1jIxaiP5XX2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089474",
"articleId": "1jIx8JYL1wk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549355",
"title": "Nonuniform and adaptive coupling stiffness for virtual grasping",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549355/12OmNB836F9",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2013/2246/0/2246a369",
"title": "Generation of Grasping Poses for Object Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2013/2246a369/12OmNBAqZIy",
"parentPublication": {
"id": "proceedings/cw/2013/2246/0",
"title": "2013 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a084",
"title": "Analysis of Medium Wrap Freehand Virtual Object Grasping in Exocentric Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a084/12OmNCfSqMX",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2012/1204/0/06184182",
"title": "Visual interpenetration tradeoffs in whole-hand virtual grasping",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2012/06184182/12OmNvA1hj6",
"parentPublication": {
"id": "proceedings/3dui/2012/1204/0",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492758",
"title": "Realistic virtual grasping",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492758/12OmNyfdOSU",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410479",
"title": "Virtual grasping for virtual assembly tasks",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410479/12OmNz6iOrG",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/04/09084269",
"title": "Evaluation of Drop Shadows for Virtual Object Grasping in Augmented Reality",
"doi": null,
"abstractUrl": "/magazine/cg/2020/04/09084269/1jtyNfWJwoo",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800l1441",
"title": "GraspNet-1Billion: A Large-Scale Benchmark for General Object Grasping",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800l1441/1m3ofIU41i0",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a749",
"title": "Freehand Grasping: An Analysis of Grasping for Docking Tasks in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a749/1tuAgrCerNC",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a391",
"title": "A Grasp on Reality: Understanding Grasping Patterns for Object Interaction in Real and Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a391/1yeQOxGvsPK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBubORS",
"title": "2015 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwpGgGH",
"doi": "10.1109/CW.2015.46",
"title": "A Method of Touching and Moving Virtual Shadows with Real Shadows",
"normalizedTitle": "A Method of Touching and Moving Virtual Shadows with Real Shadows",
"abstract": "In this research, we propose a system to touch and move virtual shadows with real shadows. Shadows are easily generated with physical objects and a light source, and they are also used for entertainment such as shadow play and shadow art. In our method, the system scans physical objects in front of a light source, generates virtual shadows according to the scan data, and superimposes the virtual shadows to real shadows of the physical objects. The virtual shadows can be changed interactively according to the change of the real shadows, and it would realize a new type of entertainment that uses shadows. Our system scans the 3D shapes and the 3D positions of physical objects, and simulates shadows of the physical objects projected on the screen. Thus the positions and the sizes of physical objects for shadows are not limited. Virtual shadows are synthesized and moved based on the simulated shadows, and they are superimposed to real shadows of the physical objects. The user can feel as if interacting with virtual shadows by real shadows.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this research, we propose a system to touch and move virtual shadows with real shadows. Shadows are easily generated with physical objects and a light source, and they are also used for entertainment such as shadow play and shadow art. In our method, the system scans physical objects in front of a light source, generates virtual shadows according to the scan data, and superimposes the virtual shadows to real shadows of the physical objects. The virtual shadows can be changed interactively according to the change of the real shadows, and it would realize a new type of entertainment that uses shadows. Our system scans the 3D shapes and the 3D positions of physical objects, and simulates shadows of the physical objects projected on the screen. Thus the positions and the sizes of physical objects for shadows are not limited. Virtual shadows are synthesized and moved based on the simulated shadows, and they are superimposed to real shadows of the physical objects. The user can feel as if interacting with virtual shadows by real shadows.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this research, we propose a system to touch and move virtual shadows with real shadows. Shadows are easily generated with physical objects and a light source, and they are also used for entertainment such as shadow play and shadow art. In our method, the system scans physical objects in front of a light source, generates virtual shadows according to the scan data, and superimposes the virtual shadows to real shadows of the physical objects. The virtual shadows can be changed interactively according to the change of the real shadows, and it would realize a new type of entertainment that uses shadows. Our system scans the 3D shapes and the 3D positions of physical objects, and simulates shadows of the physical objects projected on the screen. Thus the positions and the sizes of physical objects for shadows are not limited. Virtual shadows are synthesized and moved based on the simulated shadows, and they are superimposed to real shadows of the physical objects. The user can feel as if interacting with virtual shadows by real shadows.",
"fno": "9403a359",
"keywords": [
"Three Dimensional Displays",
"Image Color Analysis",
"Light Sources",
"Entertainment Industry",
"Art",
"Cameras",
"Shape",
"Kinect",
"Interaction",
"Shadow",
"3 DCG"
],
"authors": [
{
"affiliation": null,
"fullName": "Hiroko Iwasaki",
"givenName": "Hiroko",
"surname": "Iwasaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Momoko Kondo",
"givenName": "Momoko",
"surname": "Kondo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rei Ito",
"givenName": "Rei",
"surname": "Ito",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Saya Sugiura",
"givenName": "Saya",
"surname": "Sugiura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yuka Oba",
"givenName": "Yuka",
"surname": "Oba",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shinji Mizuno",
"givenName": "Shinji",
"surname": "Mizuno",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-10-01T00:00:00",
"pubType": "proceedings",
"pages": "359-360",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-9403-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "9403a355",
"articleId": "12OmNAkEU4r",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "9403a361",
"articleId": "12OmNzd7byt",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2001/1195/0/11950595",
"title": "Generating Dynamic Shadows for Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2001/11950595/12OmNvjyxVL",
"parentPublication": {
"id": "proceedings/iv/2001/1195/0",
"title": "Proceedings Fifth International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2013/2246/0/2246a124",
"title": "Screen Space Anisotropic Blurred Soft Shadows by Efficient Separable Filtering Method",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2013/2246a124/12OmNwdtwco",
"parentPublication": {
"id": "proceedings/cw/2013/2246/0",
"title": "2013 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a192",
"title": "[POSTER] Illumination Estimation Using Cast Shadows for Realistic Augmented Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a192/12OmNxX3uLh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/Ismar-mashd/2015/9628/0/9628a053",
"title": "The Use of Shadows on Real Floor as a Depth Correction of Stereoscopically Visualized Virtual Objects",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a053/12OmNyQYtrM",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2015/9628/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2015/8020/0/07450403",
"title": "Generate Accurate Soft Shadows Using Complete Occluder Buffer",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450403/12OmNyqiaRP",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446052",
"title": "Casting Virtual Shadows Based on Brightness Induction for Optical See-Through Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446052/13bd1hyoTyc",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875905",
"title": "Low-Pass Filtered Volumetric Shadows",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875905/13rRUy2YLYx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049677",
"title": "ShadowMover: Automatically Projecting Real Shadows onto Virtual Object",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049677/1KYooKy1LAQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798245",
"title": "Shadowless Projector: Suppressing Shadows in Projection Mapping with Micro Mirror Array Plate",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798245/1cI6ar8DdyE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798049",
"title": "Shadow Inducers: Inconspicuous Highlights for Casting Virtual Shadows on OST-HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798049/1cJ0UaezhG8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H0MWKVYUb6",
"doi": "10.1109/CVPR52688.2022.00418",
"title": "Face Relighting with Geometrically Consistent Shadows",
"normalizedTitle": "Face Relighting with Geometrically Consistent Shadows",
"abstract": "Most face relighting methods are able to handle diffuse shadows, but struggle to handle hard shadows, such as those cast by the nose. Methods that propose techniques for handling hard shadows often do not produce geometrically consistent shadows since they do not directly leverage the estimated face geometry while synthesizing them. We propose a novel differentiable algorithm for synthesizing hard shadows based on ray tracing, which we incorporate into training our face relighting model. Our proposed algorithm directly utilizes the estimated face geometry to synthesize geometrically consistent hard shadows. We demonstrate through quantitative and qualitative experiments on Multi-PIE and FFHQ that our method produces more geometrically consistent shadows than previous face relighting methods while also achieving state-of-the-art face relighting performance under directional lighting. In addition, we demonstrate that our differentiable hard shadow modeling improves the quality of the estimated face geometry over diffuse shading models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most face relighting methods are able to handle diffuse shadows, but struggle to handle hard shadows, such as those cast by the nose. Methods that propose techniques for handling hard shadows often do not produce geometrically consistent shadows since they do not directly leverage the estimated face geometry while synthesizing them. We propose a novel differentiable algorithm for synthesizing hard shadows based on ray tracing, which we incorporate into training our face relighting model. Our proposed algorithm directly utilizes the estimated face geometry to synthesize geometrically consistent hard shadows. We demonstrate through quantitative and qualitative experiments on Multi-PIE and FFHQ that our method produces more geometrically consistent shadows than previous face relighting methods while also achieving state-of-the-art face relighting performance under directional lighting. In addition, we demonstrate that our differentiable hard shadow modeling improves the quality of the estimated face geometry over diffuse shading models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most face relighting methods are able to handle diffuse shadows, but struggle to handle hard shadows, such as those cast by the nose. Methods that propose techniques for handling hard shadows often do not produce geometrically consistent shadows since they do not directly leverage the estimated face geometry while synthesizing them. We propose a novel differentiable algorithm for synthesizing hard shadows based on ray tracing, which we incorporate into training our face relighting model. Our proposed algorithm directly utilizes the estimated face geometry to synthesize geometrically consistent hard shadows. We demonstrate through quantitative and qualitative experiments on Multi-PIE and FFHQ that our method produces more geometrically consistent shadows than previous face relighting methods while also achieving state-of-the-art face relighting performance under directional lighting. In addition, we demonstrate that our differentiable hard shadow modeling improves the quality of the estimated face geometry over diffuse shading models.",
"fno": "6.946E212",
"keywords": [
"Face Recognition",
"Geometry",
"Lighting",
"Ray Tracing",
"Solid Modelling",
"Diffuse Shadows",
"Geometrically Consistent Shadows",
"Estimated Face Geometry",
"Face Relighting Model",
"Geometrically Consistent Hard Shadows",
"Previous Face Relighting Methods",
"State Of The Art Face Relighting Performance",
"Differentiable Hard Shadow Modeling",
"Geometry",
"Training",
"Shape",
"Computational Modeling",
"Face Recognition",
"Surveillance",
"Lighting"
],
"authors": [
{
"affiliation": "Michigan State University",
"fullName": "Andrew Hou",
"givenName": "Andrew",
"surname": "Hou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Qualcomm Technologies Inc.",
"fullName": "Michel Sarkis",
"givenName": "Michel",
"surname": "Sarkis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Qualcomm Technologies Inc.",
"fullName": "Ning Bi",
"givenName": "Ning",
"surname": "Bi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Michigan State University",
"fullName": "Yiying Tong",
"givenName": "Yiying",
"surname": "Tong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Michigan State University",
"fullName": "Xiaoming Liu",
"givenName": "Xiaoming",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4207-4216",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H0MWFKEYW4",
"name": "pcvpr202269460-09880331s1-mm_694600e207.zip",
"size": "16.1 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09880331s1-mm_694600e207.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "6.946E202",
"articleId": "1H1ie5kSXM4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6.946E222",
"articleId": "1H1kQpBVCZW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460513",
"title": "Illumination normalization of face images with cast shadows",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460513/12OmNBQ2VPw",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/5/01327215",
"title": "Face relighting for face recognition under generic illumination",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01327215/12OmNvDqsKV",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/5",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icapr/2009/3520/0/3520a437",
"title": "Eigen-domain Relighting of Face Images for Illumination-invariant Face Verification",
"doi": null,
"abstractUrl": "/proceedings-article/icapr/2009/3520a437/12OmNya72vy",
"parentPublication": {
"id": "proceedings/icapr/2009/3520/0",
"title": "Advances in Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000e625",
"title": "InverseFaceNet: Deep Monocular Inverse Face Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000e625/17D45XdBRQT",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500c146",
"title": "Illumination-Invariant Face Recognition With Deep Relit Face Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500c146/18j8HRRf584",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2022/5824/0/582400a719",
"title": "Temporally Consistent Relighting for Portrait Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2022/582400a719/1B12CyAdcMo",
"parentPublication": {
"id": "proceedings/wacvw/2022/5824/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200m2799",
"title": "Towards High Fidelity Monocular Face Reconstruction with Rich Reflectance using Self-supervised Learning and Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200m2799/1BmJb3RcOGY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h193",
"title": "Deep Single-Image Portrait Relighting",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h193/1hQqn2MnaNO",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f123",
"title": "Learning Physics-Guided Face Relighting Under Directional Light",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f123/1m3nFFWN09q",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900o4714",
"title": "Towards High Fidelity Face Relighting with Realistic Shadows",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900o4714/1yeMcN60KoU",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKisK",
"title": "2018 17th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"acronym": "sbgames",
"groupId": "1800056",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WK5AqA",
"doi": "10.1109/SBGAMES.2018.00017",
"title": "Controlling First-Person Character Movement: A Low-Cost Camera-based Tracking Alternative for Virtual Reality",
"normalizedTitle": "Controlling First-Person Character Movement: A Low-Cost Camera-based Tracking Alternative for Virtual Reality",
"abstract": "Virtual reality (VR) gaming is a billion-dollar industry that aims at providing a more realistic and exciting immersive experience in computer-simulated environment. Besides hardware capabilities and high-quality 3-D graphics, the feeling of immersion is augmented by incorporating realistic player-game interactions, including the control of game characters movements based on real/natural movements of the player and objects. To this intent, modern VR gaming usually leverages virtual reality headsets and additional devices/sensors whose costs are normally expensive for ordinary games users. In this paper, we propose a low-cost VR gaming system in which the smartphone is used as display and processing units, and its rear camera is used as the main sensor for controlling the character movements in a first-person shooter game (FPS) developed in this work. The VR game system can operate, on average, at 31 frames per second. Regarding the usability, 19 of 24 interviewed users evaluated the character movement precision as good or very good, while 95.8% considered the gun movement as good or very good. Also, all the users would recommend the movement control system as a real-virtual interaction alternative for VR gaming.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality (VR) gaming is a billion-dollar industry that aims at providing a more realistic and exciting immersive experience in computer-simulated environment. Besides hardware capabilities and high-quality 3-D graphics, the feeling of immersion is augmented by incorporating realistic player-game interactions, including the control of game characters movements based on real/natural movements of the player and objects. To this intent, modern VR gaming usually leverages virtual reality headsets and additional devices/sensors whose costs are normally expensive for ordinary games users. In this paper, we propose a low-cost VR gaming system in which the smartphone is used as display and processing units, and its rear camera is used as the main sensor for controlling the character movements in a first-person shooter game (FPS) developed in this work. The VR game system can operate, on average, at 31 frames per second. Regarding the usability, 19 of 24 interviewed users evaluated the character movement precision as good or very good, while 95.8% considered the gun movement as good or very good. Also, all the users would recommend the movement control system as a real-virtual interaction alternative for VR gaming.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality (VR) gaming is a billion-dollar industry that aims at providing a more realistic and exciting immersive experience in computer-simulated environment. Besides hardware capabilities and high-quality 3-D graphics, the feeling of immersion is augmented by incorporating realistic player-game interactions, including the control of game characters movements based on real/natural movements of the player and objects. To this intent, modern VR gaming usually leverages virtual reality headsets and additional devices/sensors whose costs are normally expensive for ordinary games users. In this paper, we propose a low-cost VR gaming system in which the smartphone is used as display and processing units, and its rear camera is used as the main sensor for controlling the character movements in a first-person shooter game (FPS) developed in this work. The VR game system can operate, on average, at 31 frames per second. Regarding the usability, 19 of 24 interviewed users evaluated the character movement precision as good or very good, while 95.8% considered the gun movement as good or very good. Also, all the users would recommend the movement control system as a real-virtual interaction alternative for VR gaming.",
"fno": "960500a067",
"keywords": [
"Computer Games",
"Smart Phones",
"Virtual Reality",
"First Person Character Movement",
"Virtual Reality Gaming",
"Exciting Immersive Experience",
"Computer Simulated Environment",
"Realistic Player Game Interactions",
"Modern VR Gaming",
"Virtual Reality Headsets",
"Low Cost VR Gaming System",
"First Person Shooter Game",
"VR Game System",
"Character Movement Precision",
"Gun Movement",
"Movement Control System",
"Real Virtual Interaction Alternative",
"Low Cost Camera Based Tracking Alternative",
"Game Character Movements",
"Smartphone",
"Games",
"Weapons",
"Target Tracking",
"Headphones",
"Cameras",
"Bluetooth",
"Virtual Reality Gaming",
"Character Movement Controller",
"Camera Based Tracking",
"First Person Shooter Game"
],
"authors": [
{
"affiliation": null,
"fullName": "Rodrigo F. Berriel",
"givenName": "Rodrigo F.",
"surname": "Berriel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Thiago M. Paixão",
"givenName": "Thiago M.",
"surname": "Paixão",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ivo Nicchio",
"givenName": "Ivo",
"surname": "Nicchio",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Thiago Oliveira-Santos",
"givenName": "Thiago",
"surname": "Oliveira-Santos",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sbgames",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "67-677",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-9605-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "960500a057",
"articleId": "17D45WaTkkT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "960500a075",
"articleId": "17D45Vw15xF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vs-games/2018/7123/0/08493414",
"title": "Comparison of Teleportation and Fixed Track Driving in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493414/14tNJnrhcIw",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a161",
"title": "Intelligent Wearable Virtual Reality (VR) Gaming Controller for People with Motor Disabilities",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a161/17D45XDIXOo",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09677963",
"title": "Survey of Movement Reproduction in Immersive Virtual Rehabilitation",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09677963/1A4SqmEsrhm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a574",
"title": "Enabling Virtual Reality Interactions in Confined Spaces by Re-Associating Finger Motions",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a574/1CJeyGjSZCo",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a530",
"title": "The Evaluation of Gait-Free Locomotion Methods with Eye Movement in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a530/1J7WtHqguHu",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a317",
"title": "WriArm: Leveraging Wrist Movement to Design Wrist+Arm Based Teleportation in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a317/1JrRkBbpP1K",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049667",
"title": "ConeSpeech: Exploring Directional Speech Interaction for Multi-Person Remote Communication in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049667/1KYoqrkz9zq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090616",
"title": "Developing a VR tool for studying pedestrian movement and choice behavior",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090616/1jIxA2Yom1G",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09408400",
"title": "My Virtual Self: The Role of Movement in Children's Sense of Embodiment",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09408400/1sVEPfq6BBC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a528",
"title": "VXSlate: Combining Head Movement and Mobile Touch for Large Virtual Display Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a528/1tnXg447e7e",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1AZOIZvl29y",
"title": "2021 IEEE 7th International Conference on Collaboration and Internet Computing (CIC)",
"acronym": "cic",
"groupId": "1001767",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1AZOK8jGMZq",
"doi": "10.1109/CIC52973.2021.00012",
"title": "A Collaborative and Adaptive Feedback System for Physical Exercises",
"normalizedTitle": "A Collaborative and Adaptive Feedback System for Physical Exercises",
"abstract": "Maintaining motivation to meet physical exercise goals is a big challenge in virtual/home-based exercise guidance systems. Lack of motivation, long-maintained bad daily routines, and fear of injury are some of the reasons that cause this hesitation. This paper proposes a reinforcement learning-based virtual exercise assistant capable of providing encouragement and customized feedback on body movement form over time. Repeated arm curls were observed and tracked using single and dual-camera systems using the Posenet pose estimation library. To accumulate enough experience across individuals, the reinforcement learning model was collaboratively trained by subjects. The proposed system is tested on 36 subjects. Behavioral changes are apparent in 31 of the 36 subjects, with 31 subjects reducing movement errors over time and 15 subjects completely eliminating the errors. The system was analyzed for which types of feedback provided the highest expected value, and feedback directly related to the previous mistake provided the highest valued feedback (<tex>Z_$p < 0.0133$_Z</tex>). The result showed that the Reinforcement Learning system provides meaningful feedback and positively impacts behavior progress.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Maintaining motivation to meet physical exercise goals is a big challenge in virtual/home-based exercise guidance systems. Lack of motivation, long-maintained bad daily routines, and fear of injury are some of the reasons that cause this hesitation. This paper proposes a reinforcement learning-based virtual exercise assistant capable of providing encouragement and customized feedback on body movement form over time. Repeated arm curls were observed and tracked using single and dual-camera systems using the Posenet pose estimation library. To accumulate enough experience across individuals, the reinforcement learning model was collaboratively trained by subjects. The proposed system is tested on 36 subjects. Behavioral changes are apparent in 31 of the 36 subjects, with 31 subjects reducing movement errors over time and 15 subjects completely eliminating the errors. The system was analyzed for which types of feedback provided the highest expected value, and feedback directly related to the previous mistake provided the highest valued feedback (<tex>$p < 0.0133$</tex>). The result showed that the Reinforcement Learning system provides meaningful feedback and positively impacts behavior progress.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Maintaining motivation to meet physical exercise goals is a big challenge in virtual/home-based exercise guidance systems. Lack of motivation, long-maintained bad daily routines, and fear of injury are some of the reasons that cause this hesitation. This paper proposes a reinforcement learning-based virtual exercise assistant capable of providing encouragement and customized feedback on body movement form over time. Repeated arm curls were observed and tracked using single and dual-camera systems using the Posenet pose estimation library. To accumulate enough experience across individuals, the reinforcement learning model was collaboratively trained by subjects. The proposed system is tested on 36 subjects. Behavioral changes are apparent in 31 of the 36 subjects, with 31 subjects reducing movement errors over time and 15 subjects completely eliminating the errors. The system was analyzed for which types of feedback provided the highest expected value, and feedback directly related to the previous mistake provided the highest valued feedback (-). The result showed that the Reinforcement Learning system provides meaningful feedback and positively impacts behavior progress.",
"fno": "162500a011",
"keywords": [
"Biomechanics",
"Feedback",
"Groupware",
"Humanities",
"Image Motion Analysis",
"Pose Estimation",
"Reinforcement Learning",
"Physical Exercise Goals",
"Long Maintained Bad Daily Routines",
"Reinforcement Learning Based Virtual Exercise Assistant",
"Body Movement Form",
"Repeated Arm Curls",
"Dual Camera Systems",
"Movement Errors",
"Highest Valued Feedback",
"Adaptive Feedback System",
"Posenet Pose Estimation Library",
"Q Learning",
"Tracking",
"Pose Estimation",
"Collaboration",
"Muscles",
"Real Time Systems",
"Libraries",
"Reinforcement Learning",
"Distributed Machine Learning",
"Human Computer Interaction",
"Pose Estimation"
],
"authors": [
{
"affiliation": "University of North Texas,Computer Science and Engineering,Denton,TX,USA",
"fullName": "Ishan Ranasinghe",
"givenName": "Ishan",
"surname": "Ranasinghe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Texas,Computer Science and Engineering,Denton,TX,USA",
"fullName": "Chengping Yuan",
"givenName": "Chengping",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Texas,Computer Science and Engineering,Denton,TX,USA",
"fullName": "Ram Dantu",
"givenName": "Ram",
"surname": "Dantu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Texas,Computer Science and Engineering,Denton,TX,USA",
"fullName": "Mark V. Albert",
"givenName": "Mark V.",
"surname": "Albert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cic",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-12-01T00:00:00",
"pubType": "proceedings",
"pages": "11-15",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1625-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "162500a001",
"articleId": "1AZOKmmy83m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "162500a016",
"articleId": "1AZOKIafXsA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2015/9953/0/07344578",
"title": "Pain level recognition using kinematics and muscle activity for physical rehabilitation in chronic pain",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344578/12OmNrJiCRg",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eecs/2017/2085/0/2085a484",
"title": "A Study on the Effect of Complex Rehabilitation Exericise Methods through Short-Term Active Sling Exercises and Vibration Balls on the Upper Limb Muscle Activity of People with Myelopathy",
"doi": null,
"abstractUrl": "/proceedings-article/eecs/2017/2085a484/12OmNwc3wvQ",
"parentPublication": {
"id": "proceedings/eecs/2017/2085/0",
"title": "2017 European Conference on Electrical Engineering and Computer Science (EECS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiaiaai/2014/4174/0/06913404",
"title": "Determination of the Optimum Muscle Temperature for Maintaining Work Performance with Attenuation of Heat Stress in Human",
"doi": null,
"abstractUrl": "/proceedings-article/iiaiaai/2014/06913404/12OmNweBUNs",
"parentPublication": {
"id": "proceedings/iiaiaai/2014/4174/0",
"title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2003/1890/0/18900159",
"title": "Feedback Distortion for Rehabilitation: Gauging Perceived Physical Effort",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2003/18900159/12OmNzw8iZz",
"parentPublication": {
"id": "proceedings/haptics/2003/1890/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/03/07457685",
"title": "Pseudo-Haptic Feedback in Teleoperation",
"doi": null,
"abstractUrl": "/journal/th/2016/03/07457685/13rRUyYjK5o",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2017/4338/0/07917531",
"title": "Trainwear: A real-time assisted training feedback system with fabric wearable sensors",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2017/07917531/19wAHOhA9yM",
"parentPublication": {
"id": "proceedings/percom-workshops/2017/4338/0",
"title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cphs/2022/8203/0/820300a013",
"title": "Short Stick Exercise Tracking System for Elderly Rehabilitation using IMU Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/cphs/2022/820300a013/1Eyj7eNdRVS",
"parentPublication": {
"id": "proceedings/cphs/2022/8203/0",
"title": "2022 2nd International Workshop on Cyber-Physical-Human System Design and Implementation (CPHS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2019/2286/0/228600a507",
"title": "LOWER-LIMB FOLLOW-UP: A Surface Electromyography Based Serious Computer Game and Patient Follow-Up System for Lower Extremity Muscle Strengthening Exercises in Physiotherapy and Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2019/228600a507/1cdO1onj03K",
"parentPublication": {
"id": "proceedings/cbms/2019/2286/0",
"title": "2019 IEEE 32nd International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2020/4657/0/09127379",
"title": "W8-Scope: Fine-Grained, Practical Monitoring of Weight Stack-based Exercises",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2020/09127379/1l3yMDI0bAY",
"parentPublication": {
"id": "proceedings/percom/2020/4657/0",
"title": "2020 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sp/2021/8934/0/893400b589",
"title": "HackEd: A Pedagogical Analysis of Online Vulnerability Discovery Exercises",
"doi": null,
"abstractUrl": "/proceedings-article/sp/2021/893400b589/1t0x9tbe4mY",
"parentPublication": {
"id": "proceedings/sp/2021/8934/0/",
"title": "2021 IEEE Symposium on Security and Privacy (SP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": [
{
"id": "1EOGqWLzSFy",
"videoExt": "mp4",
"videoType": {
"featured": false,
"recommended": false,
"sponsored": false,
"__typename": "VideoTypesType"
},
"article": {
"id": "1AZOK8jGMZq",
"fno": "162500a011",
"issueNum": null,
"pubType": "proceedings",
"volume": "0",
"year": "2021",
"idPrefix": "cic",
"doi": "10.1109/CIC52973.2021.00012",
"title": "A Collaborative and Adaptive Feedback System for Physical Exercises",
"__typename": "ArticleType"
},
"channel": {
"id": "1EOGq37DbLq",
"title": "CIC 2021",
"status": "1",
"featured": false,
"defaultVideoId": "1EOGpsohrbO",
"category": {
"id": "1xvX5lT1WiQ",
"title": "Proceeding",
"type": "proceeding",
"__typename": "VideoCategoryType"
},
"__typename": "VideoChannelType"
},
"year": "2021",
"title": "A Collaborative and Adaptive Feedback System for Physical Exercises",
"description": "Maintaining motivation to meet physical exercise goals is a big challenge in virtual/home-based exercise guidance systems. Lack of motivation, long-maintained bad daily routines, and fear of injury are some of the reasons that cause this hesitation. This paper proposes a reinforcement learning-based virtual exercise assistant capable of providing encouragement and customized feedback on body movement form over time. Repeated arm curls were observed and tracked using single and dual-camera systems using the Posenet pose estimation library. To accumulate enough experience across individuals, the reinforcement learning model was collaboratively trained by subjects. The proposed system is tested on 36 subjects. Behavioral changes are apparent in 31 of the 36 subjects, with 31 subjects reducing movement errors over time and 15 subjects completely eliminating the errors. The system was analyzed for which types of feedback provided the highest expected value, and feedback directly related to the previous mistake provided the highest valued feedback (p < 0.0133). The result showed that the Reinforcement Learning system provides meaningful feedback and positively impacts behavior progress.",
"keywords": [
{
"id": "1EOGriyIvOE",
"title": "Distributed Machine Learning",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1EOGrk9djX2",
"title": "Pose Estimation",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1xw6Jk4dUze",
"title": "Reinforcement learning",
"status": "1",
"__typename": "VideoKeywordsType"
},
{
"id": "1xxbiSPrXLq",
"title": "Human-Computer Interaction",
"status": "1",
"__typename": "VideoKeywordsType"
}
],
"speakers": [
{
"firstName": "Ishan",
"lastName": "Ranasinghe",
"affiliation": "University of North Texas,Computer Science and Engineering,Denton,TX,USA",
"__typename": "SpeakerType"
},
{
"firstName": "Chengping",
"lastName": "Yuan",
"affiliation": "University of North Texas,Computer Science and Engineering,Denton,TX,USA",
"__typename": "SpeakerType"
},
{
"firstName": "Ram",
"lastName": "Dantu",
"affiliation": "University of North Texas,Computer Science and Engineering,Denton,TX,USA",
"__typename": "SpeakerType"
},
{
"firstName": "Mark V.",
"lastName": "Albert",
"affiliation": "University of North Texas,Computer Science and Engineering,Denton,TX,USA",
"__typename": "SpeakerType"
}
],
"created": "2022-07-07T00:00:00",
"updated": "2022-07-07T00:00:00",
"imageThumbnailUrl": "thumbnails/1EOGqWLzSFy.jpeg",
"runningTime": "00:17:24",
"aspectRatio": "16:9",
"metrics": {
"views": "0",
"likes": "0",
"__typename": "VideoMetricsType"
},
"notShowInVideoLib": false,
"__typename": "VideoType"
}
]
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJeyGjSZCo",
"doi": "10.1109/VRW55335.2022.00138",
"title": "Enabling Virtual Reality Interactions in Confined Spaces by Re-Associating Finger Motions",
"normalizedTitle": "Enabling Virtual Reality Interactions in Confined Spaces by Re-Associating Finger Motions",
"abstract": "As Virtual Reality (VR) headsets become mobile, people can interact in public places with applications often requiring large arm movements. However, using these open gestures is often uncomfortable and sometimes impossible in confined and public spaces (e.g., commuting in a vehicle). We introduce the concept of finger mapping, re-associating small-scale finger motions onto virtual arms in a larger VR space. Finger mapping supports various interactions (e.g., arms swinging movement, selection, manipulation, and locomotion) when the environment is constrained and does not allow large gestures. Finally, we discuss the opportunities and challenges of using finger mapping for VR interactions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As Virtual Reality (VR) headsets become mobile, people can interact in public places with applications often requiring large arm movements. However, using these open gestures is often uncomfortable and sometimes impossible in confined and public spaces (e.g., commuting in a vehicle). We introduce the concept of finger mapping, re-associating small-scale finger motions onto virtual arms in a larger VR space. Finger mapping supports various interactions (e.g., arms swinging movement, selection, manipulation, and locomotion) when the environment is constrained and does not allow large gestures. Finally, we discuss the opportunities and challenges of using finger mapping for VR interactions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As Virtual Reality (VR) headsets become mobile, people can interact in public places with applications often requiring large arm movements. However, using these open gestures is often uncomfortable and sometimes impossible in confined and public spaces (e.g., commuting in a vehicle). We introduce the concept of finger mapping, re-associating small-scale finger motions onto virtual arms in a larger VR space. Finger mapping supports various interactions (e.g., arms swinging movement, selection, manipulation, and locomotion) when the environment is constrained and does not allow large gestures. Finally, we discuss the opportunities and challenges of using finger mapping for VR interactions.",
"fno": "840200a574",
"keywords": [
"Biomechanics",
"Virtual Reality",
"Finger Mapping",
"VR Interactions",
"Virtual Reality Interactions",
"Confined Spaces",
"Virtual Reality Headsets",
"Public Places",
"Arm Movements",
"Open Gestures",
"Public Spaces",
"Small Scale Finger Motions",
"Virtual Arms",
"VR Space",
"Space Vehicles",
"Headphones",
"Three Dimensional Displays",
"Tracking",
"Conferences",
"Virtual Reality",
"Aerospace Electronics",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Télécom Paris, Institut Polytechnique de Paris",
"fullName": "Wen-Jie Tseng",
"givenName": "Wen-Jie",
"surname": "Tseng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Télécom Paris, Institut Polytechnique de Paris",
"fullName": "Samuel Huron",
"givenName": "Samuel",
"surname": "Huron",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Télécom Paris, Institut Polytechnique de Paris",
"fullName": "Eric Lecolinet",
"givenName": "Eric",
"surname": "Lecolinet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Télécom Paris, Institut Polytechnique de Paris",
"fullName": "Jan Gugenheimer",
"givenName": "Jan",
"surname": "Gugenheimer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "574-575",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJeyCQ9QLm",
"name": "pvrw202284020-09757636s1-mm_840200a574.zip",
"size": "682 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757636s1-mm_840200a574.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a572",
"articleId": "1CJdbRJ9Edi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a576",
"articleId": "1CJd33f4h4k",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icis/2017/5507/0/07960025",
"title": "A flexible finger-mounted airbrush model for immersive freehand painting",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2017/07960025/12OmNBV9Ikp",
"parentPublication": {
"id": "proceedings/icis/2017/5507/0",
"title": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798857",
"title": "Poster: Evaluation of a smart tablet's interface for 3D interaction",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798857/12OmNC3Xhrk",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2020/01/08017559",
"title": "A Comparison of Seated and Room-Scale Virtual Reality in a Serious Game for Epidural Preparation",
"doi": null,
"abstractUrl": "/journal/ec/2020/01/08017559/13rRUxBJhBQ",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a404",
"title": "Comparing the Fidelity of Contemporary Pointing with Controller Interactions on Performance of Personal Space Target Selection",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a404/1JrRlimqMKc",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797916",
"title": "DepthMove: Hands-free Interaction in Virtual Reality Using Head Motions in the Depth Dimension",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797916/1cJ0K0zJcv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797989",
"title": "Redirected Jumping: Imperceptibly Manipulating Jump Motions in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797989/1cJ15zHucrC",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2020/7002/0/700200a552",
"title": "airFinger: Micro Finger Gesture Recognition via NIR Light Sensing for Smart Devices",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2020/700200a552/1rsiORLSrio",
"parentPublication": {
"id": "proceedings/icdcs/2020/7002/0",
"title": "2020 IEEE 40th International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a761",
"title": "Demonstrating the Use of Rapid Touch Interaction in Virtual Reality for Prolonged Interaction in Productivity Scenarios",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a761/1tnX9xsCTVC",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a460",
"title": "A Comparison of the Fatigue Progression of Eye-Tracked and Motion-Controlled Interaction in Immersive Space",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a460/1yeD2ZpZzPi",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a251",
"title": "A Japanese Character Flick-Input Interface for Entering Text in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a251/1yfxO7CNnQk",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzBOhX1",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"acronym": "acii",
"groupId": "1002992",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrMZpkm",
"doi": "10.1109/ACII.2013.145",
"title": "How to Touch Humans: Guidelines for Social Agents and Robots That Can Touch",
"normalizedTitle": "How to Touch Humans: Guidelines for Social Agents and Robots That Can Touch",
"abstract": "Touch is an essential channel in interpersonal and affective communication, yet most social agents currently lack the capability to touch the user. In this paper we show the credibility of three premises that make the case that providing touch capability to social robots will increase their effectiveness in communicating emotions, building trust and achieving behavioral changes. The first premise is that humans can communicate distinct emotions through touch only, the second is that this is also possible through mediated (virtual) touch, and the third is that social agents can use the same mediated touch technology as effectively as humans. Based on a literature review, we also formulate ten design rules as guidance for the development of social agents that can touch. These rules concern parameters that regulate the meaning of touch cues like context and familiarity, the implicit and explicit meanings of touch, user characteristics, and parameters that can be communicated through affective touch.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Touch is an essential channel in interpersonal and affective communication, yet most social agents currently lack the capability to touch the user. In this paper we show the credibility of three premises that make the case that providing touch capability to social robots will increase their effectiveness in communicating emotions, building trust and achieving behavioral changes. The first premise is that humans can communicate distinct emotions through touch only, the second is that this is also possible through mediated (virtual) touch, and the third is that social agents can use the same mediated touch technology as effectively as humans. Based on a literature review, we also formulate ten design rules as guidance for the development of social agents that can touch. These rules concern parameters that regulate the meaning of touch cues like context and familiarity, the implicit and explicit meanings of touch, user characteristics, and parameters that can be communicated through affective touch.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Touch is an essential channel in interpersonal and affective communication, yet most social agents currently lack the capability to touch the user. In this paper we show the credibility of three premises that make the case that providing touch capability to social robots will increase their effectiveness in communicating emotions, building trust and achieving behavioral changes. The first premise is that humans can communicate distinct emotions through touch only, the second is that this is also possible through mediated (virtual) touch, and the third is that social agents can use the same mediated touch technology as effectively as humans. Based on a literature review, we also formulate ten design rules as guidance for the development of social agents that can touch. These rules concern parameters that regulate the meaning of touch cues like context and familiarity, the implicit and explicit meanings of touch, user characteristics, and parameters that can be communicated through affective touch.",
"fno": "5048a780",
"keywords": [
"Haptic Interfaces",
"Robot Sensing Systems",
"Visualization",
"Context",
"Human Computer Interaction",
"Avatars",
"Emotion",
"Touch",
"Robots",
"Social Agents",
"Haptics",
"Affect"
],
"authors": [
{
"affiliation": "TNO, Soesterberg, Netherlands",
"fullName": "Jan B. F. Van Erp",
"givenName": "Jan B. F.",
"surname": "Van Erp",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TNO, Soesterberg, Netherlands",
"fullName": "Alexander Toet",
"givenName": "Alexander",
"surname": "Toet",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-09-01T00:00:00",
"pubType": "proceedings",
"pages": "780-785",
"year": "2013",
"issn": "2156-8103",
"isbn": "978-0-7695-5048-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5048a776",
"articleId": "12OmNxeut2z",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5048a786",
"articleId": "12OmNAq3hLn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2015/9953/0/07344656",
"title": "A warm touch of affect?",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344656/12OmNBIWXCi",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344694",
"title": "Design of a wearable research tool for warm mediated social touches",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344694/12OmNBhHtio",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a769",
"title": "International Workshop on Mediated Touch and Affect (MeTA 2013): Introduction",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a769/12OmNqJq4xv",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811019",
"title": "Virtual Humans That Touch Back: Enhancing Nonverbal Communication with Virtual Humans through Bidirectional Touch",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811019/12OmNwMXnsz",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/03/07811300",
"title": "Social Touch Technology: A Survey of Haptic Technology for Social Touch",
"doi": null,
"abstractUrl": "/journal/th/2017/03/07811300/13rRUxNW1TZ",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2016/02/07161320",
"title": "Design and Evaluation of a Touch-Centered Calming Interaction with a Social Robot",
"doi": null,
"abstractUrl": "/journal/ta/2016/02/07161320/13rRUxly9cg",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2009/03/tth2009030136",
"title": "The Virtual Midas Touch: Helping Behavior After a Mediated Social Touch",
"doi": null,
"abstractUrl": "/journal/th/2009/03/tth2009030136/13rRUygT7n5",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2019/3888/0/08925469",
"title": "Touch Media: Investigating the Effects of Remote Touch on Music-based Emotion Elicitation",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2019/08925469/1fHGHYVqCTm",
"parentPublication": {
"id": "proceedings/acii/2019/3888/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2023/01/09258960",
"title": "Touching Virtual Humans: Haptic Responses Reveal the Emotional Impact of Affective Agents",
"doi": null,
"abstractUrl": "/journal/ta/2023/01/09258960/1oIW8klCOiY",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09444587",
"title": "Receiving a mediated touch from your partner vs. a male stranger: How visual feedback of touch and its sender influence touch experience",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09444587/1u3mvTnIRKo",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyjLoRw",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrMHOkY",
"doi": "10.1109/ISMAR.2014.6948466",
"title": "[Poster] Interacting with your own hands in a fully immersive MR system",
"normalizedTitle": "[Poster] Interacting with your own hands in a fully immersive MR system",
"abstract": "This poster introduces a fully immersive Mixed Reality system we have recently developed, where the user is free to walk inside a virtual scenario while wearing a HMD. The novelty of the system lies in the fact that users can see and use their real hands — by means of a Kinect-like camera mounted on the HMD — in order to naturally interact with the virtual objects. Our working hypothesis are that the introduction of the photorealistic capture of users' hands in a coherently rendered virtual scenario induces in them a strong feeling of presence and embodiment without the need of using a synthetic 3D modelled avatar as a representation of the self. We also argue that the users' ability of grasping and manipulating virtual objects using their own hands not only provides an intuitive interaction experience, but also improves self-perception as well as the perception of the environment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This poster introduces a fully immersive Mixed Reality system we have recently developed, where the user is free to walk inside a virtual scenario while wearing a HMD. The novelty of the system lies in the fact that users can see and use their real hands — by means of a Kinect-like camera mounted on the HMD — in order to naturally interact with the virtual objects. Our working hypothesis are that the introduction of the photorealistic capture of users' hands in a coherently rendered virtual scenario induces in them a strong feeling of presence and embodiment without the need of using a synthetic 3D modelled avatar as a representation of the self. We also argue that the users' ability of grasping and manipulating virtual objects using their own hands not only provides an intuitive interaction experience, but also improves self-perception as well as the perception of the environment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This poster introduces a fully immersive Mixed Reality system we have recently developed, where the user is free to walk inside a virtual scenario while wearing a HMD. The novelty of the system lies in the fact that users can see and use their real hands — by means of a Kinect-like camera mounted on the HMD — in order to naturally interact with the virtual objects. Our working hypothesis are that the introduction of the photorealistic capture of users' hands in a coherently rendered virtual scenario induces in them a strong feeling of presence and embodiment without the need of using a synthetic 3D modelled avatar as a representation of the self. We also argue that the users' ability of grasping and manipulating virtual objects using their own hands not only provides an intuitive interaction experience, but also improves self-perception as well as the perception of the environment.",
"fno": "06948466",
"keywords": [
"Training",
"Three Dimensional Displays",
"Virtual Environments",
"Cameras",
"Avatars",
"Industries",
"Natural Interaction",
"Mixed Reality",
"Hand Gestures"
],
"authors": [
{
"affiliation": "Scuola Superiore Sant'Anna, CSIRO Computational Informatics",
"fullName": "Franco Tecchia",
"givenName": "Franco",
"surname": "Tecchia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Scuola Superiore Sant'Anna, CSIRO Computational Informatics",
"fullName": "Giovanni Avveduto",
"givenName": "Giovanni",
"surname": "Avveduto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Scuola Superiore Sant'Anna, CSIRO Computational Informatics",
"fullName": "Marcello Carrozzino",
"givenName": "Marcello",
"surname": "Carrozzino",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Scuola Superiore Sant'Anna, CSIRO Computational Informatics",
"fullName": "Raffaelo Brondi",
"givenName": "Raffaelo",
"surname": "Brondi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Scuola Superiore Sant'Anna, CSIRO Computational Informatics",
"fullName": "Massimo Bergamasco",
"givenName": "Massimo",
"surname": "Bergamasco",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Scuola Superiore Sant'Anna, CSIRO Computational Informatics",
"fullName": "Leila Alem",
"givenName": "Leila",
"surname": "Alem",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "313-314",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6184-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06948465",
"articleId": "12OmNxzMnMV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06948467",
"articleId": "12OmNrkT7xo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892299",
"title": "Immersive and collaborative Taichi motion learning in various VR environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892299/12OmNCvumOD",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2013/11/0/06728903",
"title": "Real-time egocentric superimposition of operator's own body on telexistence avatar in virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2013/06728903/12OmNrHjqIc",
"parentPublication": {
"id": "proceedings/icat/2013/11/0",
"title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2018/2290/0/08343267",
"title": "Immersive gesture interfaces for 3D map navigation in HMD-based virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2018/08343267/12OmNvD8Rwt",
"parentPublication": {
"id": "proceedings/icoin/2018/2290/0",
"title": "2018 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504689",
"title": "The impact of a self-avatar on cognitive load in immersive virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504689/12OmNviHKla",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504775",
"title": "Evaluation of the effect of a virtual avatar's representation on distance perception in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504775/12OmNwpXROu",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131748",
"title": "Aughanded Virtuality - the hands in the virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131748/12OmNx5piUz",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836524",
"title": "Perceptual Issues of a Passive Haptics Feedback Based MR System",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836524/12OmNxecS4t",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797787",
"title": "The Effect of Hand Size and Interaction Modality on the Virtual Hand Illusion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797787/1cJ179JUrPa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797797",
"title": "Working Memory Load Performance Based on Collocation of Virtual and Physical Hands",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797797/1cJ1gejsg2Q",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089540",
"title": "Influence of Perspective on Dynamic Tasks in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089540/1jIxarbH6AU",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBzRNrw",
"title": "2013 46th Hawaii International Conference on System Sciences",
"acronym": "hicss",
"groupId": "1000730",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwekjyJ",
"doi": "10.1109/HICSS.2013.546",
"title": "The Virtual \"Me\" is the Actual Me: Self-Disclosure in Virtual Environment",
"normalizedTitle": "The Virtual \"Me\" is the Actual Me: Self-Disclosure in Virtual Environment",
"abstract": "Considering the importance of self-disclosure in building relationships and bonds, it is vital to investigate how self-disclosure is affected by avatars utilized in many online communities. In this study, we tested a research model that explores how perceived avatar-self similarity affects self-disclosure via different theoretical constructs such as self-awareness, self-presence and identifiability. The research model was empirically tested with data from a web-based survey of 196 Second Life users. Results revealed that avatar similarity impacts self-disclosure but with varying effects, depending on how it is mediated by variables of identifiability, self-awareness and self-presence. Specifically, appearance similarity affects homophily, which leads to heightened self-awareness. This results in increased feelings of self-presence, which positively affects self-disclosure. Homophily also has the effect of heightening perceptions of identifiability, which decreases self-disclosure. Implications and applications are discussed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Considering the importance of self-disclosure in building relationships and bonds, it is vital to investigate how self-disclosure is affected by avatars utilized in many online communities. In this study, we tested a research model that explores how perceived avatar-self similarity affects self-disclosure via different theoretical constructs such as self-awareness, self-presence and identifiability. The research model was empirically tested with data from a web-based survey of 196 Second Life users. Results revealed that avatar similarity impacts self-disclosure but with varying effects, depending on how it is mediated by variables of identifiability, self-awareness and self-presence. Specifically, appearance similarity affects homophily, which leads to heightened self-awareness. This results in increased feelings of self-presence, which positively affects self-disclosure. Homophily also has the effect of heightening perceptions of identifiability, which decreases self-disclosure. Implications and applications are discussed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Considering the importance of self-disclosure in building relationships and bonds, it is vital to investigate how self-disclosure is affected by avatars utilized in many online communities. In this study, we tested a research model that explores how perceived avatar-self similarity affects self-disclosure via different theoretical constructs such as self-awareness, self-presence and identifiability. The research model was empirically tested with data from a web-based survey of 196 Second Life users. Results revealed that avatar similarity impacts self-disclosure but with varying effects, depending on how it is mediated by variables of identifiability, self-awareness and self-presence. Specifically, appearance similarity affects homophily, which leads to heightened self-awareness. This results in increased feelings of self-presence, which positively affects self-disclosure. Homophily also has the effect of heightening perceptions of identifiability, which decreases self-disclosure. Implications and applications are discussed.",
"fno": "4892a883",
"keywords": [
"Avatars",
"Second Life",
"Virtual Environments",
"Psychology",
"Loading",
"Educational Institutions",
"Buildings",
"Virtual Environment",
"Avatar",
"Self Disclosure"
],
"authors": [
{
"affiliation": null,
"fullName": "Rosalie Hooi",
"givenName": "Rosalie",
"surname": "Hooi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hichang Cho",
"givenName": "Hichang",
"surname": "Cho",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2013-01-01T00:00:00",
"pubType": "proceedings",
"pages": "883-892",
"year": "2013",
"issn": "1530-1605",
"isbn": "978-1-4673-5933-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4892a873",
"articleId": "12OmNzEmFEs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4892a893",
"articleId": "12OmNARiLZR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itng/2010/3984/0/3984a529",
"title": "Effects of System Characteristics on Users' Self-Disclosure in Social Networking Sites",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2010/3984a529/12OmNqFrGyI",
"parentPublication": {
"id": "proceedings/itng/2010/3984/0",
"title": "Information Technology: New Generations, Third International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2011/4513/3/4513c066",
"title": "Relationships between Robot's Self-Disclosures and Human's Anxiety toward Robots",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2011/4513c066/12OmNvkpkTX",
"parentPublication": {
"id": "proceedings/wi-iat/2011/4513/3",
"title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2003/1874/6/187460161b",
"title": "A Self-Disclosure Model for Personal Health Information",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2003/187460161b/12OmNvlxJtl",
"parentPublication": {
"id": "proceedings/hicss/2003/1874/6",
"title": "36th Annual Hawaii International Conference on System Sciences, 2003. Proceedings of the",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icee/2010/3997/0/3997e913",
"title": "The Quality of Listed Banks' Internal Control Information Disclosure",
"doi": null,
"abstractUrl": "/proceedings-article/icee/2010/3997e913/12OmNwdbVaJ",
"parentPublication": {
"id": "proceedings/icee/2010/3997/0",
"title": "International Conference on E-Business and E-Government",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pci/2009/3788/0/3788a207",
"title": "Avatars' Appearance and Social Behavior in Online Virtual Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/pci/2009/3788a207/12OmNwt5sn9",
"parentPublication": {
"id": "proceedings/pci/2009/3788/0",
"title": "2009 13th Panhellenic Conference on Informatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/passat-socialcom/2011/1931/0/06113132",
"title": "Group Membership and Diffusion in Virtual Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/passat-socialcom/2011/06113132/12OmNy6Zs5M",
"parentPublication": {
"id": "proceedings/passat-socialcom/2011/1931/0",
"title": "2011 IEEE Third Int'l Conference on Privacy, Security, Risk and Trust (PASSAT) / 2011 IEEE Third Int'l Conference on Social Computing (SocialCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2009/3823/4/3823e416",
"title": "New Media Use in Context: Environmental Cues and Online Self-Disclosure via Weblogs",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2009/3823e416/12OmNySosKI",
"parentPublication": {
"id": "proceedings/cse/2009/3823/2",
"title": "2009 International Conference on Computational Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2012/4525/0/4525d480",
"title": "Linguistic Markers of Secrets and Sensitive Self-Disclosure in Twitter",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2012/4525d480/12OmNzYeAMG",
"parentPublication": {
"id": "proceedings/hicss/2012/4525/0",
"title": "2012 45th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icm/2011/4522/4/4522d179",
"title": "Assessment of the Information Disclosure Level about Government Website through AHP-TOPSIS Method",
"doi": null,
"abstractUrl": "/proceedings-article/icm/2011/4522d179/12OmNzahbWc",
"parentPublication": {
"id": "proceedings/icm/2011/4522/4",
"title": "Information Technology, Computer Engineering and Management Sciences, International Conference of",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2007/03/e0171",
"title": "Efficiency of Vulnerability Disclosure Mechanisms to Disseminate Vulnerability Knowledge",
"doi": null,
"abstractUrl": "/journal/ts/2007/03/e0171/13rRUwInv66",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzVXNJh",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxxvAKv",
"doi": "10.1109/3DUI.2015.7131728",
"title": "Characterizing embodied interaction in First and Third Person Perspective viewpoints",
"normalizedTitle": "Characterizing embodied interaction in First and Third Person Perspective viewpoints",
"abstract": "Third Person Perspective (3PP) viewpoints have the potential to expand how one perceives and acts in a virtual environment. They offer increased awareness of the posture and of the surrounding of the virtual body as compared to First Person Perspective (1PP). But from another standpoint, 3PP can be considered as less effective for inducing a strong sense of embodiment into a virtual body. Following an experimental paradigm based on full body motion capture and immersive interaction, this study investigates the effect of perspective and of visuomotor synchrony on the sense of embodiment. It provides evidence supporting a high sense of embodiment in both 1PP and 3PP during engaging motor tasks, as well as guidelines for choosing the optimal perspective depending on location of targets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Third Person Perspective (3PP) viewpoints have the potential to expand how one perceives and acts in a virtual environment. They offer increased awareness of the posture and of the surrounding of the virtual body as compared to First Person Perspective (1PP). But from another standpoint, 3PP can be considered as less effective for inducing a strong sense of embodiment into a virtual body. Following an experimental paradigm based on full body motion capture and immersive interaction, this study investigates the effect of perspective and of visuomotor synchrony on the sense of embodiment. It provides evidence supporting a high sense of embodiment in both 1PP and 3PP during engaging motor tasks, as well as guidelines for choosing the optimal perspective depending on location of targets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Third Person Perspective (3PP) viewpoints have the potential to expand how one perceives and acts in a virtual environment. They offer increased awareness of the posture and of the surrounding of the virtual body as compared to First Person Perspective (1PP). But from another standpoint, 3PP can be considered as less effective for inducing a strong sense of embodiment into a virtual body. Following an experimental paradigm based on full body motion capture and immersive interaction, this study investigates the effect of perspective and of visuomotor synchrony on the sense of embodiment. It provides evidence supporting a high sense of embodiment in both 1PP and 3PP during engaging motor tasks, as well as guidelines for choosing the optimal perspective depending on location of targets.",
"fno": "07131728",
"keywords": [
"Visualization",
"Cameras",
"Avatars",
"End Effectors",
"Tracking",
"Rubber",
"Synchronization",
"Third Person Perspective",
"Virtual Reality",
"Embodied Interaction",
"Embodiment"
],
"authors": [
{
"affiliation": "Immersive Interaction Group, École Polytechnique Fédérale de Lausanne, Switzerland",
"fullName": "Henrique G. Debarba",
"givenName": "Henrique G.",
"surname": "Debarba",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Immersive Interaction Group, École Polytechnique Fédérale de Lausanne, Switzerland",
"fullName": "Eray Molla",
"givenName": "Eray",
"surname": "Molla",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Neuroprosthetics, École Polytechnique Fédérale de Lausanne, Switzerland",
"fullName": "Bruno Herbelin",
"givenName": "Bruno",
"surname": "Herbelin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Immersive Interaction Group, École Polytechnique Fédérale de Lausanne, Switzerland",
"fullName": "Ronan Boulic",
"givenName": "Ronan",
"surname": "Boulic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "67-72",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-6886-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07131727",
"articleId": "12OmNzuZUrc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07131729",
"articleId": "12OmNAZfxIF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504682",
"title": "The role of interaction in virtual embodiment: Effects of the virtual hand representation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504682/12OmNwE9Our",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2010/3869/0/07-07-01",
"title": "Embodied Social Presence Theory",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2010/07-07-01/12OmNx6g6qg",
"parentPublication": {
"id": "proceedings/hicss/2010/3869/0",
"title": "2010 43rd Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2014/07/mco2014070024",
"title": "Transcending the Self in Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/magazine/co/2014/07/mco2014070024/13rRUwcAqvw",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09744001",
"title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a730",
"title": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a730/1CJffY1QgeI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049676",
"title": "The Impact of Avatar and Environment Congruence on Plausibility, Embodiment, Presence, and the Proteus Effect in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049676/1KYosbnM8q4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049764",
"title": "Effects of Collaborative Training Using Virtual Co-embodiment on Motor Skill Learning",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049764/1KYox5WNvnW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2018/0604/0/060400a075",
"title": "User Experience in Games with HMD Glasses through First and Third Person Viewpoints with Emphasis on Embodiment",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2018/060400a075/1cJ7y8bAdOw",
"parentPublication": {
"id": "proceedings/svr/2018/0604/0",
"title": "2018 20th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a452",
"title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a511",
"title": "The Impact of Virtual Reality and Viewpoints in Body Motion Based Drone Teleoperation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a511/1tuAKv1IB0s",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNro0Ib9",
"title": "Volume Graphics 2005",
"acronym": "vg",
"groupId": "1002149",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAIdBRP",
"doi": "10.1109/VG.2005.194095",
"title": "An evaluation of using real-time volumetric display of 3D ultrasound data for intracardiac catheter manipulation tasks",
"normalizedTitle": "An evaluation of using real-time volumetric display of 3D ultrasound data for intracardiac catheter manipulation tasks",
"abstract": "The enthusiasm for novel, minimally invasive, catheter based intracardiac procedures has highlighted the need to provide accurate, realtime, anatomically based image guidance to decrease complications, improve precision, and decrease fluoroscopy time. The recent development of realtime 3D echocardiography creates the opportunity to greatly improve our ability to guide minimally invasive procedures (Ahmad, 2003). However, the need to present 3D data on a 2D display decreases the utility of 3D echocardiography because echocardiographers cannot readily appreciate 3D perspective on a 2D display without ongoing image manipulation. We evaluated the use of a novel strategy of presenting the data in a true 3D volumetric display, Perspecta Spatial 3D System (Actuality Systems, Inc., Burlington, MA). Two experienced echocardiographers performed the task of identifying the targeted location of a catheter within 6 different phantoms using four display methods. Echocardiographic images were obtained with a SONOS 7500 (Philips Medical Systems, Inc., Andover, MA). Completion of the task was significantly faster with the Perspecta display with no loss in accuracy. Echocardiography in 3D significantly improves the ability of echocardiography for guidance of catheter based procedures. Further improvement is achieved by using a true 3D volumetric display, which allows for more intuitive assessment of the spatial relationships of catheters in three-dimensional space compared with conventional 2D visualization modalities.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The enthusiasm for novel, minimally invasive, catheter based intracardiac procedures has highlighted the need to provide accurate, realtime, anatomically based image guidance to decrease complications, improve precision, and decrease fluoroscopy time. The recent development of realtime 3D echocardiography creates the opportunity to greatly improve our ability to guide minimally invasive procedures (Ahmad, 2003). However, the need to present 3D data on a 2D display decreases the utility of 3D echocardiography because echocardiographers cannot readily appreciate 3D perspective on a 2D display without ongoing image manipulation. We evaluated the use of a novel strategy of presenting the data in a true 3D volumetric display, Perspecta Spatial 3D System (Actuality Systems, Inc., Burlington, MA). Two experienced echocardiographers performed the task of identifying the targeted location of a catheter within 6 different phantoms using four display methods. Echocardiographic images were obtained with a SONOS 7500 (Philips Medical Systems, Inc., Andover, MA). Completion of the task was significantly faster with the Perspecta display with no loss in accuracy. Echocardiography in 3D significantly improves the ability of echocardiography for guidance of catheter based procedures. Further improvement is achieved by using a true 3D volumetric display, which allows for more intuitive assessment of the spatial relationships of catheters in three-dimensional space compared with conventional 2D visualization modalities.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The enthusiasm for novel, minimally invasive, catheter based intracardiac procedures has highlighted the need to provide accurate, realtime, anatomically based image guidance to decrease complications, improve precision, and decrease fluoroscopy time. The recent development of realtime 3D echocardiography creates the opportunity to greatly improve our ability to guide minimally invasive procedures (Ahmad, 2003). However, the need to present 3D data on a 2D display decreases the utility of 3D echocardiography because echocardiographers cannot readily appreciate 3D perspective on a 2D display without ongoing image manipulation. We evaluated the use of a novel strategy of presenting the data in a true 3D volumetric display, Perspecta Spatial 3D System (Actuality Systems, Inc., Burlington, MA). Two experienced echocardiographers performed the task of identifying the targeted location of a catheter within 6 different phantoms using four display methods. Echocardiographic images were obtained with a SONOS 7500 (Philips Medical Systems, Inc., Andover, MA). Completion of the task was significantly faster with the Perspecta display with no loss in accuracy. Echocardiography in 3D significantly improves the ability of echocardiography for guidance of catheter based procedures. Further improvement is achieved by using a true 3D volumetric display, which allows for more intuitive assessment of the spatial relationships of catheters in three-dimensional space compared with conventional 2D visualization modalities.",
"fno": "01500522",
"keywords": [
"Three Dimensional Displays",
"Catheters",
"Echocardiography",
"Computer Graphics",
"Medical Image Processing",
"Real Time Volumetric Display",
"3 D Ultrasound Data",
"Intracardiac Catheter Manipulation",
"Image Guidance",
"Precision Improvement",
"Fluoroscopy Time",
"Real Time 3 D Echocardiography",
"Minimally Invasive Procedure",
"3 D Data Presentation",
"2 D Display",
"3 D Perspective",
"Image Manipulation",
"3 D Volumetric Display",
"Perspecta Spatial 3 D System",
"Catheter Location",
"Echocardiographic Image",
"SONOS 7500",
"Perspecta Display",
"Three Dimensional Space",
"2 D Visualization Modality",
"Cardiac Ablation",
"Computer Graphics",
"Intracardiac Catheter Procedure",
"Medical Visualization",
"Volume Graphics",
"Three Dimensional Displays",
"Ultrasonic Imaging",
"Catheters",
"Echocardiography",
"Two Dimensional Displays",
"Minimally Invasive Surgery",
"Imaging Phantoms",
"SONOS Devices",
"Biomedical Imaging",
"Visualization"
],
"authors": [
{
"affiliation": "Stanford Univ., CA, USA",
"fullName": "A.S. Wang",
"givenName": "A.S.",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Stanford Univ., CA, USA",
"fullName": "Girish Narayan",
"givenName": null,
"surname": "Girish Narayan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "D. Kao",
"givenName": "D.",
"surname": "Kao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "D. Liang",
"givenName": "D.",
"surname": "Liang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-03-01T00:00:00",
"pubType": "proceedings",
"pages": "41,42,43,44,45",
"year": "2005",
"issn": "1727-8376",
"isbn": "3-905673-26-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01500521",
"articleId": "12OmNvAAthd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01500523",
"articleId": "12OmNyLA5zV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a443",
"title": "An Interactive 3D Preoperative Planning and Training System for Minimally Invasive Vascular Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a443/12OmNAfy7Id",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/2/00413528",
"title": "Multispectral analysis of object surfaces extracted from volumetric data sets",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413528/12OmNC3FG58",
"parentPublication": {
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2009/3852/0/3852a397",
"title": "Research of a High-Resolution Volumetric 3D Display System",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2009/3852a397/12OmNwtn3v2",
"parentPublication": {
"id": "proceedings/icinis/2009/3852/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999696",
"title": "Biopackaging of Minimally Invasive Ultrasound Assisted Clot Lysis Device for Stroke Treatment",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999696/12OmNxxdZNj",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2005/05/01510543",
"title": "3D stereo interactive medical visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2005/05/01510543/13rRUyYBlja",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eccs/2022/9689/0/968900a028",
"title": "Effective Data Augmentation Methods with U-Net Model for Catheter Segmentation on Echocardiography Image",
"doi": null,
"abstractUrl": "/proceedings-article/eccs/2022/968900a028/1JvaPcoleYE",
"parentPublication": {
"id": "proceedings/eccs/2022/9689/0",
"title": "2022 European Conference on Communication Systems (ECCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049674",
"title": "Evaluation of AR visualization approaches for catheter insertion into the ventricle cavity",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049674/1KYoqGfVtBK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090508",
"title": "Interactive Navigation System in Mixed-Reality for Neurosurgery",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090508/1jIxvVHkrqo",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvidl/2020/9481/0/948100a197",
"title": "A segmentation processing method for effectively solving the problem of partial covering of angiography images",
"doi": null,
"abstractUrl": "/proceedings-article/cvidl/2020/948100a197/1pbe6OUNf2g",
"parentPublication": {
"id": "proceedings/cvidl/2020/9481/0",
"title": "2020 International Conference on Computer Vision, Image and Deep Learning (CVIDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAiSpZ",
"title": "2015 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCdk2Jm",
"doi": "10.1109/VR.2015.7223388",
"title": "Preliminary evaluation of a virtual needle insertion training system",
"normalizedTitle": "Preliminary evaluation of a virtual needle insertion training system",
"abstract": "Inserting a needle to perform a biopsy requires a high haptic sensitivity. The traditional learning methods based on observation and training on real patients are questionable. In this paper, we present a preliminary evaluation of a VR trainer for needle insertion tasks. The system aims to replicate an existing physical setup while overcoming some of its limitations. Results permit to validate some design choices and suggest some UI improvements.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Inserting a needle to perform a biopsy requires a high haptic sensitivity. The traditional learning methods based on observation and training on real patients are questionable. In this paper, we present a preliminary evaluation of a VR trainer for needle insertion tasks. The system aims to replicate an existing physical setup while overcoming some of its limitations. Results permit to validate some design choices and suggest some UI improvements.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Inserting a needle to perform a biopsy requires a high haptic sensitivity. The traditional learning methods based on observation and training on real patients are questionable. In this paper, we present a preliminary evaluation of a VR trainer for needle insertion tasks. The system aims to replicate an existing physical setup while overcoming some of its limitations. Results permit to validate some design choices and suggest some UI improvements.",
"fno": "07223388",
"keywords": [
"Needles",
"Haptic Interfaces",
"Training",
"Accuracy",
"Biopsy",
"Three Dimensional Displays",
"Biological Tissues",
"User Evaluation",
"Haptic Perception",
"Surgical Training"
],
"authors": [
{
"affiliation": "IBISC Laboratory, University of Evry, France",
"fullName": "Duc Van Nguyen",
"givenName": "Duc",
"surname": "Van Nguyen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IBISC Laboratory, University of Evry, France",
"fullName": "Safa Ben Lakhal",
"givenName": "Safa",
"surname": "Ben Lakhal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IBISC Laboratory, University of Evry, France",
"fullName": "Amine Chellali",
"givenName": "Amine",
"surname": "Chellali",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "247-248",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1727-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07223387",
"articleId": "12OmNrkT7Ll",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07223389",
"articleId": "12OmNwqft3l",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892259",
"title": "Study of interaction fidelity for two viewpoint changing techniques in a virtual biopsy trainer",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892259/12OmNAZOJZ9",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2015/7983/0/07367697",
"title": "A mathematical model of a novel automated medical device for needle insertions",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2015/07367697/12OmNBkP3zY",
"parentPublication": {
"id": "proceedings/bibe/2015/7983/0",
"title": "2015 IEEE 15th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/achi/2010/3957/0/3957a148",
"title": "The Effectiveness of Commercial Haptic Devices for Use in Virtual Needle Insertion Training Simulations",
"doi": null,
"abstractUrl": "/proceedings-article/achi/2010/3957a148/12OmNrJRPe0",
"parentPublication": {
"id": "proceedings/achi/2010/3957/0",
"title": "International Conference on Advances in Computer-Human Interaction",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a007",
"title": "Auditory and Visio-Temporal Distance Coding for 3-Dimensional Perception in Medical Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a007/12OmNrYlmCL",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479920",
"title": "Assessment of Vibrotactile Feedback in a Needle-Insertion Task using a Surgical Robot",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479920/12OmNyOq4T4",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/03/tth2011030188",
"title": "Haptic Simulator for Prostate Brachytherapy with Simulated Needle and Probe Interaction",
"doi": null,
"abstractUrl": "/journal/th/2011/03/tth2011030188/13rRUILtJr3",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2011/02/mcg2011020036",
"title": "A Virtual Reality Simulator for Ultrasound-Guided Biopsy Training",
"doi": null,
"abstractUrl": "/magazine/cg/2011/02/mcg2011020036/13rRUwjoNCd",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/04/06909076",
"title": "Teleoperation of Steerable Flexible Needles by Combining Kinesthetic and Vibratory Feedback",
"doi": null,
"abstractUrl": "/journal/th/2014/04/06909076/13rRUxASuhN",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/03/tth2011030199",
"title": "Integrating Haptics with Augmented Reality in a Femoral Palpation and Needle Insertion Training Simulation",
"doi": null,
"abstractUrl": "/journal/th/2011/03/tth2011030199/13rRUxd2aZb",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/03/tth2011030155",
"title": "Perception and Action in Teleoperated Needle Insertion",
"doi": null,
"abstractUrl": "/journal/th/2011/03/tth2011030155/13rRUyoPSPf",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBrDqDF",
"title": "BioMedical Engineering and Informatics, International Conference on",
"acronym": "bmei",
"groupId": "1001754",
"volume": "2",
"displayVolume": "2",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvF83nw",
"doi": "10.1109/BMEI.2008.66",
"title": "MRI-Based Patient-Specific Computational Modeling of Right Ventricular Response to Pulmonary Valve Insertion Surgery: A Passive Anisotropic FSI Model with Fiber Orientation",
"normalizedTitle": "MRI-Based Patient-Specific Computational Modeling of Right Ventricular Response to Pulmonary Valve Insertion Surgery: A Passive Anisotropic FSI Model with Fiber Orientation",
"abstract": "Right ventricular (RV) dysfunction is a common cause of heart failure inpatients with congenital heart defects and often leads to impaired functional capacity and premature death. The current surgical approach, which includes pulmonary valve replacement/insertion (PVR), has yielded mixed results. MRI-based patient- specific RV/LV/Patch combination models which included fluid-structure interactions (FSI), anisotropic material properties and two-layer construction with fiber orientations were introduced to test the hypothesis that a PVR surgical design with a smaller patch and more aggressive scar tissue trimming would lead to improved RV cardiac function recovery. Results from our models validated by pre-operation data indicated that the small patch design had 10% improvement in RV function as measured by RV ejection fraction, compared to the conventional patch. Maximum Stress-P<sub>1</sub> value from the anisotropic model was 149.6% higher than that from the isotropic model. Computational RV volume predictions agreed well with CMR-measured volume data (error < 3%).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Right ventricular (RV) dysfunction is a common cause of heart failure inpatients with congenital heart defects and often leads to impaired functional capacity and premature death. The current surgical approach, which includes pulmonary valve replacement/insertion (PVR), has yielded mixed results. MRI-based patient- specific RV/LV/Patch combination models which included fluid-structure interactions (FSI), anisotropic material properties and two-layer construction with fiber orientations were introduced to test the hypothesis that a PVR surgical design with a smaller patch and more aggressive scar tissue trimming would lead to improved RV cardiac function recovery. Results from our models validated by pre-operation data indicated that the small patch design had 10% improvement in RV function as measured by RV ejection fraction, compared to the conventional patch. Maximum Stress-P<sub>1</sub> value from the anisotropic model was 149.6% higher than that from the isotropic model. Computational RV volume predictions agreed well with CMR-measured volume data (error < 3%).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Right ventricular (RV) dysfunction is a common cause of heart failure inpatients with congenital heart defects and often leads to impaired functional capacity and premature death. The current surgical approach, which includes pulmonary valve replacement/insertion (PVR), has yielded mixed results. MRI-based patient- specific RV/LV/Patch combination models which included fluid-structure interactions (FSI), anisotropic material properties and two-layer construction with fiber orientations were introduced to test the hypothesis that a PVR surgical design with a smaller patch and more aggressive scar tissue trimming would lead to improved RV cardiac function recovery. Results from our models validated by pre-operation data indicated that the small patch design had 10% improvement in RV function as measured by RV ejection fraction, compared to the conventional patch. Maximum Stress-P1 value from the anisotropic model was 149.6% higher than that from the isotropic model. Computational RV volume predictions agreed well with CMR-measured volume data (error < 3%).",
"fno": "3118b160",
"keywords": [
"Biomedical MRI",
"Cardiology",
"Medical Computing",
"Surgery",
"MRI Based Patient Specific Computational Modeling",
"Right Ventricular Dysfunction",
"Pulmonary Valve Insertion Surgery",
"Passive Anisotropic FSI Model",
"Fiber Orientation",
"Heart Failure",
"Congenital Heart Defects",
"Fluid Structure Interaction",
"Anisotropic Material Properties",
"Two Layer Construction",
"Scar Tissue Trimming",
"Maximum Stress P 1 Value",
"Computational Modeling",
"Valves",
"Surgery",
"Anisotropic Magnetoresistance",
"Heart",
"Biological System Modeling",
"Solid Modeling",
"Pediatrics",
"Biomedical Imaging",
"Design Optimization",
"Right Ventricle",
"Congenital Heart Disease",
"Tetralogy Of Fallot",
"Heart Model",
"Fluid Structure Interaction"
],
"authors": [
{
"affiliation": "School of Mathematical Sciences, Beijing Normal University, Beijing, China",
"fullName": "Chun Yang",
"givenName": "Chun",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mathematical Sciences Department, Worcester Polytechnic InstituteWorcester, MA 01609,",
"fullName": "Dalin Tang",
"givenName": "Dalin",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Cardiology, Children's Hospital, BostonDepartment of Pediatric, Harvard Medical School, Boston, MA 02115 USA",
"fullName": "Tal Geva",
"givenName": "Tal",
"surname": "Geva",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "4Dept. of Cardiac Surgery, Children’s Hospital, BostonHarvard Medical School, Boston, MA 02115 USA",
"fullName": "Pedro J. del Nido",
"givenName": "Pedro J.",
"surname": "del Nido",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bmei",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-05-01T00:00:00",
"pubType": "proceedings",
"pages": "160-167",
"year": "2008",
"issn": "1948-2914",
"isbn": "978-0-7695-3118-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3118b076",
"articleId": "12OmNCwCLs1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3118b081",
"articleId": "12OmNBv2CfL",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2015/7568/0/7568a392",
"title": "Simulation and Visualization of Deformation with Anisotropic Materials",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a392/12OmNAOKnXL",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2014/4284/0/4284a111",
"title": "Adaptive Anisotropic Diffusion for Image Denoising Based on Structure Tensor",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a111/12OmNAS9zBg",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2008/3554/0/04775703",
"title": "Anisotropic Diffusion for Preservation of Line-edges",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2008/04775703/12OmNBA9oBV",
"parentPublication": {
"id": "proceedings/isspit/2008/3554/0",
"title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/1/07294790",
"title": "Oriented half Gaussian kernels and anisotropic diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07294790/12OmNyrIatl",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/1",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2010/7259/0/05521462",
"title": "Reversely Anisotropic Quad-dominant Remeshing",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2010/05521462/12OmNywxlVm",
"parentPublication": {
"id": "proceedings/smi/2010/7259/0",
"title": "Shape Modeling International (SMI 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284694",
"title": "Anisotropic Manifold Ranking for Video Annotation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284694/12OmNzhELfS",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07194844",
"title": "Anisotropic Ambient Volume Shading",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07194844/13rRUB7a1fT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/11/ttg2013111782",
"title": "Anisotropic Sampling of Planar and Two-Manifold Domains for Texture Generation and Glyph Distribution",
"doi": null,
"abstractUrl": "/journal/tg/2013/11/ttg2013111782/13rRUNvgziE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/10/07130660",
"title": "Stable Anisotropic Materials",
"doi": null,
"abstractUrl": "/journal/tg/2015/10/07130660/13rRUy3gn7z",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2019/3918/0/391800a607",
"title": "Anisotropic Laplace-Beltrami Operators for Non-Rigid 3D Shape Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2019/391800a607/1gRxl5FIji8",
"parentPublication": {
"id": "proceedings/itme/2019/3918/0",
"title": "2019 10th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNApcuag",
"title": "IEEE Haptics Symposium 2008",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyOq4T4",
"doi": "10.1109/HAPTICS.2008.4479920",
"title": "Assessment of Vibrotactile Feedback in a Needle-Insertion Task using a Surgical Robot",
"normalizedTitle": "Assessment of Vibrotactile Feedback in a Needle-Insertion Task using a Surgical Robot",
"abstract": "The present study examined the effect of vibrotactile feedback in a needle-insertion task using a surgical robot. Four participants performed the task by hand (using a manual needle driver instrument) and by using a surgical robot, with or without vibrotactile feedback. The vibrotactile feedback signal indicated the deviation in force direction, with the signal amplitude modulated by the force magnitude. Visual feedback was always available in all experimental conditions. The participants' task was to insert a hooked needle into a simulated tissue pad at a pre- marked entrance point and drive it out of the tissue pad at a corresponding pre-marked exit point. The participants were instructed to hold the hooked needle in an orientation that minimized side-loading on the simulated tissue pad and prevented needle rotation in the needle driver. The forces exerted by the needle on the simulated tissue pad were recorded. The results indicated that the vibrotactile display was useful in reducing the overall force-direction deviation during the needle-insertion task, but it increased task completion time. It generally took twice as long to perform the task with the robot than with the hand. One participant who was experienced with the surgical robot consistently applied less force with the robot than with the hand. The vibrotactile feedback reduced the magnitude of the force component that was perpendicular to the suturing surface, but not the forces along the suturing surface. We compare our results to those reported in the literature and discuss the challenges we faced in assessing haptic feedback in a skilled surgical task such as the one used in the present study.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The present study examined the effect of vibrotactile feedback in a needle-insertion task using a surgical robot. Four participants performed the task by hand (using a manual needle driver instrument) and by using a surgical robot, with or without vibrotactile feedback. The vibrotactile feedback signal indicated the deviation in force direction, with the signal amplitude modulated by the force magnitude. Visual feedback was always available in all experimental conditions. The participants' task was to insert a hooked needle into a simulated tissue pad at a pre- marked entrance point and drive it out of the tissue pad at a corresponding pre-marked exit point. The participants were instructed to hold the hooked needle in an orientation that minimized side-loading on the simulated tissue pad and prevented needle rotation in the needle driver. The forces exerted by the needle on the simulated tissue pad were recorded. The results indicated that the vibrotactile display was useful in reducing the overall force-direction deviation during the needle-insertion task, but it increased task completion time. It generally took twice as long to perform the task with the robot than with the hand. One participant who was experienced with the surgical robot consistently applied less force with the robot than with the hand. The vibrotactile feedback reduced the magnitude of the force component that was perpendicular to the suturing surface, but not the forces along the suturing surface. We compare our results to those reported in the literature and discuss the challenges we faced in assessing haptic feedback in a skilled surgical task such as the one used in the present study.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The present study examined the effect of vibrotactile feedback in a needle-insertion task using a surgical robot. Four participants performed the task by hand (using a manual needle driver instrument) and by using a surgical robot, with or without vibrotactile feedback. The vibrotactile feedback signal indicated the deviation in force direction, with the signal amplitude modulated by the force magnitude. Visual feedback was always available in all experimental conditions. The participants' task was to insert a hooked needle into a simulated tissue pad at a pre- marked entrance point and drive it out of the tissue pad at a corresponding pre-marked exit point. The participants were instructed to hold the hooked needle in an orientation that minimized side-loading on the simulated tissue pad and prevented needle rotation in the needle driver. The forces exerted by the needle on the simulated tissue pad were recorded. The results indicated that the vibrotactile display was useful in reducing the overall force-direction deviation during the needle-insertion task, but it increased task completion time. It generally took twice as long to perform the task with the robot than with the hand. One participant who was experienced with the surgical robot consistently applied less force with the robot than with the hand. The vibrotactile feedback reduced the magnitude of the force component that was perpendicular to the suturing surface, but not the forces along the suturing surface. We compare our results to those reported in the literature and discuss the challenges we faced in assessing haptic feedback in a skilled surgical task such as the one used in the present study.",
"fno": "04479920",
"keywords": [
"Feedback",
"Medical Robotics",
"Surgery",
"Vibrotactile Feedback Assessment",
"Needle Insertion Task",
"Surgical Robot",
"Force Direction",
"Signal Amplitude",
"Force Magnitude",
"Visual Feedback",
"Pre Marked Entrance Point",
"Vibrotactile Display",
"Force Direction Deviation",
"Suturing Surface",
"Medical Robotics",
"Needles",
"Surgery",
"Force Feedback",
"Haptic Interfaces",
"Laboratories",
"Robot Sensing Systems",
"Surgical Instruments",
"Visualization",
"Hospitals",
"Surgical Robot",
"Surgical Simulation",
"Vibrotactile Feedback",
"Evaluation",
"Needle Insertion Task",
"H 5 2 Information Interfaces And Presentation User Interfaces Haptic I O",
"H 1 2 Models And Principles User Machine Systems Human Factors"
],
"authors": [
{
"affiliation": "Haptic Interface Research Laboratory, Purdue University, West Lafayette, IN, USA, Email: peddamat@purdue.edu",
"fullName": "Sumanth Peddamatham",
"givenName": "Sumanth",
"surname": "Peddamatham",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Medical Robotics and Visualization Laboratory, Purdue University, West Lafayette, IN, USA, Email: peine@purdue.edu",
"fullName": "William Peine",
"givenName": "William",
"surname": "Peine",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Haptic Interface Research Laboratory, Purdue University, West Lafayette, IN, USA, Email: hongtan@purdue.edu",
"fullName": "Hong Z. Tan",
"givenName": "Hong Z.",
"surname": "Tan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-03-01T00:00:00",
"pubType": "proceedings",
"pages": "93-99",
"year": "2008",
"issn": "2324-7347",
"isbn": "978-1-4244-2005-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04479901",
"articleId": "12OmNyQ7FN1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04479906",
"articleId": "12OmNAKuoT0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223388",
"title": "Preliminary evaluation of a virtual needle insertion training system",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223388/12OmNCdk2Jm",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479999",
"title": "RoSS: Virtual Reality Robotic Surgical Simulator for the da Vinci Surgical System",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479999/12OmNvAiSC9",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/03/07581117",
"title": "A Physics-Based Vibrotactile Feedback Library for Collision Events",
"doi": null,
"abstractUrl": "/journal/th/2017/03/07581117/13rRUwIF69q",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/04/06909076",
"title": "Teleoperation of Steerable Flexible Needles by Combining Kinesthetic and Vibratory Feedback",
"doi": null,
"abstractUrl": "/journal/th/2014/04/06909076/13rRUxASuhN",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2013/01/tth2013010013",
"title": "Comparison of Visual and Vibrotactile Feedback Methods for Seated Posture Guidance",
"doi": null,
"abstractUrl": "/journal/th/2013/01/tth2013010013/13rRUxcKzVp",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/03/tth2011030199",
"title": "Integrating Haptics with Augmented Reality in a Femoral Palpation and Needle Insertion Training Simulation",
"doi": null,
"abstractUrl": "/journal/th/2011/03/tth2011030199/13rRUxd2aZb",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1996/01/mcg1996010046",
"title": "Assessing Craniofacial Surgical Simulation",
"doi": null,
"abstractUrl": "/magazine/cg/1996/01/mcg1996010046/13rRUy0ZzUT",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/03/tth2011030155",
"title": "Perception and Action in Teleoperated Needle Insertion",
"doi": null,
"abstractUrl": "/journal/th/2011/03/tth2011030155/13rRUyoPSPf",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/03/08948290",
"title": "Surgical Navigation System for Low-Dose-Rate Brachytherapy Based on Mixed Reality",
"doi": null,
"abstractUrl": "/magazine/cg/2021/03/08948290/1geNLto4KGs",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2021/4261/0/09635512",
"title": "Magnetic Model Calibration for Tetherless Surgical Needle Manipulation using Zernike Polynomial Fitting",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2021/09635512/1zmvmdco7ao",
"parentPublication": {
"id": "proceedings/bibe/2021/4261/0",
"title": "2021 IEEE 21st International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvlxJwR",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"acronym": "cbms",
"groupId": "1000153",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzQR1qX",
"doi": "10.1109/CBMS.2017.12",
"title": "Simulation for Training Cochlear Implant Electrode Insertion",
"normalizedTitle": "Simulation for Training Cochlear Implant Electrode Insertion",
"abstract": "Cochlear implant surgery is performed to restore hearing in patients with a range of hearing disorders. To optimise hearing outcomes, trauma during the insertion of a cochlear implant electrode has to be minimised. Factors that contribute to the degree of trauma caused during surgery include: the location of the electrode, type of electrode, and the competence level of the surgeon. Surgical competence depends on knowledge of anatomy and experience in a range of situations, along with technical skills. Thus, during training, a surgeon should be exposed to a range of anatomical variations, where he/she can learn and practice the intricacies of the surgical procedure, as well as explore different implant options and consequences thereof. Virtual reality simulation offers a versatile platform on which such training can be conducted. In this paper, we discuss a prototype implementation for the visualisation and analysis of electrode trajectories in relation to anatomical variation, prior to its inclusion in a virtual reality training module for cochlear implant surgery.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cochlear implant surgery is performed to restore hearing in patients with a range of hearing disorders. To optimise hearing outcomes, trauma during the insertion of a cochlear implant electrode has to be minimised. Factors that contribute to the degree of trauma caused during surgery include: the location of the electrode, type of electrode, and the competence level of the surgeon. Surgical competence depends on knowledge of anatomy and experience in a range of situations, along with technical skills. Thus, during training, a surgeon should be exposed to a range of anatomical variations, where he/she can learn and practice the intricacies of the surgical procedure, as well as explore different implant options and consequences thereof. Virtual reality simulation offers a versatile platform on which such training can be conducted. In this paper, we discuss a prototype implementation for the visualisation and analysis of electrode trajectories in relation to anatomical variation, prior to its inclusion in a virtual reality training module for cochlear implant surgery.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cochlear implant surgery is performed to restore hearing in patients with a range of hearing disorders. To optimise hearing outcomes, trauma during the insertion of a cochlear implant electrode has to be minimised. Factors that contribute to the degree of trauma caused during surgery include: the location of the electrode, type of electrode, and the competence level of the surgeon. Surgical competence depends on knowledge of anatomy and experience in a range of situations, along with technical skills. Thus, during training, a surgeon should be exposed to a range of anatomical variations, where he/she can learn and practice the intricacies of the surgical procedure, as well as explore different implant options and consequences thereof. Virtual reality simulation offers a versatile platform on which such training can be conducted. In this paper, we discuss a prototype implementation for the visualisation and analysis of electrode trajectories in relation to anatomical variation, prior to its inclusion in a virtual reality training module for cochlear implant surgery.",
"fno": "1710a001",
"keywords": [
"Biomedical Electrodes",
"Cochlear Implants",
"Ear",
"Hearing",
"Medical Computing",
"Medical Disorders",
"Surgery",
"Virtual Reality",
"Surgical Competence",
"Virtual Reality Simulation",
"Virtual Reality Training Module",
"Cochlear Implant Surgery",
"Hearing Disorders",
"Cochlear Implant Electrode Insertion",
"Electrode Trajectory Visualization",
"Electrode Trajectory Analysis",
"Surgery",
"Electrodes",
"Bones",
"Cochlear Implants",
"Training",
"Solid Modeling",
"Trajectory",
"Cochlear Implant Trajectory",
"Cochlear Implant Electrode Insertion Training",
"Electrode Trajectory Visualisation",
"Virtual Reality Simulation"
],
"authors": [
{
"affiliation": null,
"fullName": "Xingjun Ma",
"givenName": "Xingjun",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sudanthi Wijewickrema",
"givenName": "Sudanthi",
"surname": "Wijewickrema",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yun Zhou",
"givenName": "Yun",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bridget Copson",
"givenName": "Bridget",
"surname": "Copson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "James Bailey",
"givenName": "James",
"surname": "Bailey",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gregor Kennedy",
"givenName": "Gregor",
"surname": "Kennedy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stephen O'Leary",
"givenName": "Stephen",
"surname": "O'Leary",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cbms",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2017",
"issn": "2372-9198",
"isbn": "978-1-5386-1710-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1710z037",
"articleId": "12OmNAJ4peY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1710a007",
"articleId": "12OmNyen1q8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cmc/2011/312/0/05931172",
"title": "Study on Mobile Phone-Based Speech Processor for Cochlear Implant",
"doi": null,
"abstractUrl": "/proceedings-article/cmc/2011/05931172/12OmNvrdI6p",
"parentPublication": {
"id": "proceedings/cmc/2011/312/0",
"title": "2011 Third International Conference on Communications and Mobile Computing (CMC 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/1993/3752/0/00263012",
"title": "Cochlear implants for the profoundly deaf",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1993/00263012/12OmNwE9Owt",
"parentPublication": {
"id": "proceedings/cbms/1993/3752/0",
"title": "Proceedings of the Sixth Annual 1993 IEEE Symposium Computer-Based Medical Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmete/2016/3411/0/07938982",
"title": "Comparison of Speech Intelligibility Parameter in Cochlear Implants by Spatial Filtering and Coherence Function Methods",
"doi": null,
"abstractUrl": "/proceedings-article/icmete/2016/07938982/12OmNxR5UOq",
"parentPublication": {
"id": "proceedings/icmete/2016/3411/0",
"title": "2016 International Conference on Micro-Electronics and Telecommunication Engineering (ICMETE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2017/1710/0/1710a007",
"title": "Design and Evaluation of a Virtual Reality Simulation Module for Training Advanced Temporal Bone Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2017/1710a007/12OmNyen1q8",
"parentPublication": {
"id": "proceedings/cbms/2017/1710/0",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/4/01326751",
"title": "Vowel and consonant confusion in noise by cochlear implant subjects: predicting performance using signal processing techniques",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326751/12OmNylsZWp",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/4",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/4/01326748",
"title": "Importance of pitch and periodicity to Chinese-speaking cochlear implant patients",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326748/12OmNz61cWQ",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/4",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/7/3507g351",
"title": "Modeling Performances of Virtual Channels for Cochlear Implant Systems",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507g351/12OmNzTYBVs",
"parentPublication": {
"id": "proceedings/csie/2009/3507/7",
"title": "Computer Science and Information Engineering, World Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bmei/2008/3118/1/3118a478",
"title": "A New Speech Coding for Improving the Quality of Cochlear Implant",
"doi": null,
"abstractUrl": "/proceedings-article/bmei/2008/3118a478/12OmNzw8j7D",
"parentPublication": {
"id": "proceedings/bmei/2008/3118/1",
"title": "2008 International Conference on Biomedical Engineering and Informatics (BMEI 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2008/01/mpc2008010040",
"title": "A Cochlear-Implant Processor for Encoding Music and Lowering Stimulation Power",
"doi": null,
"abstractUrl": "/magazine/pc/2008/01/mpc2008010040/13rRUyfbwnW",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceee/2019/3910/0/391000a144",
"title": "Application of Environment Noise Classification towards Sound Recognition for Cochlear Implant Users",
"doi": null,
"abstractUrl": "/proceedings-article/iceee/2019/391000a144/1cpqFK5IsO4",
"parentPublication": {
"id": "proceedings/iceee/2019/3910/0",
"title": "2019 6th International Conference on Electrical and Electronics Engineering (ICEEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKisA",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WwsQ7x",
"doi": "10.1109/AIVR.2018.00057",
"title": "A Virtual Reality Based Simulator for Training Surgical Skills in Procedure of Catheter Ablation",
"normalizedTitle": "A Virtual Reality Based Simulator for Training Surgical Skills in Procedure of Catheter Ablation",
"abstract": "We present a VR-based simulator built for training surgical skills in the procedure of catheter ablation. Based on multi-body dynamics, we proposed a novel method to simulate the interactive behavior of the surgical devices and the human vascular system. An estimation based optimization technique and a track based motion control strategy are proposed to make the simulation efficient enough to achieve high level performance. The beating of human heart is also simulated in real time with our method within the position based dynamics framework. Results demonstrate that our simulator provides a realistic, effective, and stable environment for trainees to acquire essential surgical skills.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a VR-based simulator built for training surgical skills in the procedure of catheter ablation. Based on multi-body dynamics, we proposed a novel method to simulate the interactive behavior of the surgical devices and the human vascular system. An estimation based optimization technique and a track based motion control strategy are proposed to make the simulation efficient enough to achieve high level performance. The beating of human heart is also simulated in real time with our method within the position based dynamics framework. Results demonstrate that our simulator provides a realistic, effective, and stable environment for trainees to acquire essential surgical skills.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a VR-based simulator built for training surgical skills in the procedure of catheter ablation. Based on multi-body dynamics, we proposed a novel method to simulate the interactive behavior of the surgical devices and the human vascular system. An estimation based optimization technique and a track based motion control strategy are proposed to make the simulation efficient enough to achieve high level performance. The beating of human heart is also simulated in real time with our method within the position based dynamics framework. Results demonstrate that our simulator provides a realistic, effective, and stable environment for trainees to acquire essential surgical skills.",
"fno": "926900a247",
"keywords": [
"Biomedical Education",
"Catheters",
"Computer Based Training",
"Medical Computing",
"Motion Control",
"Surgery",
"Virtual Reality",
"Training Surgical Skills",
"Catheter Ablation",
"VR Based Simulator",
"Multibody Dynamics",
"Interactive Behavior",
"Surgical Devices",
"Human Vascular System",
"Estimation Based Optimization Technique",
"Track Based Motion Control Strategy",
"Human Heart",
"Position Based Dynamics Framework",
"Essential Surgical Skills",
"Heart",
"Surgery",
"Catheters",
"Training",
"Solid Modeling",
"Navigation",
"Virtual Reality",
"Surgical Simulation",
"Catheter Ablation",
"Atrial Fibrillation"
],
"authors": [
{
"affiliation": null,
"fullName": "Haoyu Wang",
"givenName": "Haoyu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sheng Jiang",
"givenName": "Sheng",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jianhuang Wu",
"givenName": "Jianhuang",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-12-01T00:00:00",
"pubType": "proceedings",
"pages": "247-248",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-9269-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "926900a244",
"articleId": "17D45XlyDvR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "926900a249",
"articleId": "17D45X0yjS0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2013/2246/0/2246a052",
"title": "Incorporating Haptic and Olfactory into Surgical Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2013/2246a052/12OmNrJAefy",
"parentPublication": {
"id": "proceedings/cw/2013/2246/0",
"title": "2013 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479999",
"title": "RoSS: Virtual Reality Robotic Surgical Simulator for the da Vinci Surgical System",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479999/12OmNvAiSC9",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460069",
"title": "A part-task haptic simulator for ophthalmic surgical training",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460069/12OmNwAKCQd",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2014/4435/0/4435a551",
"title": "Haptic System for Force-Profile Acquisition and Display for a Realistic Surgical Simulator",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2014/4435a551/12OmNzYNNae",
"parentPublication": {
"id": "proceedings/cbms/2014/4435/0",
"title": "2014 IEEE 27th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/01/mcg2013010044",
"title": "A VR Simulator for Intracardiac Intervention",
"doi": null,
"abstractUrl": "/magazine/cg/2013/01/mcg2013010044/13rRUILLkxY",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a932",
"title": "[DC] XR for Improving Cardiac Catheter Ablation Procedure",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a932/1CJes1FnQLm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049674",
"title": "Evaluation of AR visualization approaches for catheter insertion into the ventricle cavity",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049674/1KYoqGfVtBK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090508",
"title": "Interactive Navigation System in Mixed-Reality for Neurosurgery",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090508/1jIxvVHkrqo",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/annsim/2021/375/0/09552048",
"title": "Towards Applications of the “Surgical GPS” on Spinal Procedures",
"doi": null,
"abstractUrl": "/proceedings-article/annsim/2021/09552048/1xsdGFTFU4w",
"parentPublication": {
"id": "proceedings/annsim/2021/375/0",
"title": "2021 Annual Modeling and Simulation Conference (ANNSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j517",
"title": "Towards Unified Surgical Skill Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j517/1yeJPNahaj6",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyaXPPB",
"title": "Information Visualization, IEEE Symposium on",
"acronym": "ieee-infovis",
"groupId": "1000371",
"volume": "0",
"displayVolume": "0",
"year": "2001",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzDeh89",
"doi": "10.1109/INFVIS.2001.963274",
"title": "Change Blindness in Information Visualization: A Case Study",
"normalizedTitle": "Change Blindness in Information Visualization: A Case Study",
"abstract": "Change blindness occurs when people do not notice changes in visible elements of a scene. If people use an information visualization system to compare document collection subsets partitioned by their time-stamps, change blindness makes it impossible for them to recognize even very major changes, let alone minor ones. We describe theories from cognitive science that account for the change blindness phenomenon, as well as solutions developed for two visual analysis tools, a dot plot (SPIRE Galaxies) and landscape (ThemeView(tm)) visualizations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Change blindness occurs when people do not notice changes in visible elements of a scene. If people use an information visualization system to compare document collection subsets partitioned by their time-stamps, change blindness makes it impossible for them to recognize even very major changes, let alone minor ones. We describe theories from cognitive science that account for the change blindness phenomenon, as well as solutions developed for two visual analysis tools, a dot plot (SPIRE Galaxies) and landscape (ThemeView(tm)) visualizations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Change blindness occurs when people do not notice changes in visible elements of a scene. If people use an information visualization system to compare document collection subsets partitioned by their time-stamps, change blindness makes it impossible for them to recognize even very major changes, let alone minor ones. We describe theories from cognitive science that account for the change blindness phenomenon, as well as solutions developed for two visual analysis tools, a dot plot (SPIRE Galaxies) and landscape (ThemeView(tm)) visualizations.",
"fno": "13420015",
"keywords": [],
"authors": [
{
"affiliation": "Pacific Northwest National Laboratory",
"fullName": "Lucy Nowell",
"givenName": "Lucy",
"surname": "Nowell",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Pacific Northwest National Laboratory",
"fullName": "Elizabeth Hetzler",
"givenName": "Elizabeth",
"surname": "Hetzler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Pacific Northwest National Laboratory",
"fullName": "Ted Tanasse",
"givenName": "Ted",
"surname": "Tanasse",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-infovis",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2001-10-01T00:00:00",
"pubType": "proceedings",
"pages": "15",
"year": "2001",
"issn": "1522-404X",
"isbn": "0-7695-1342-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "13420007",
"articleId": "12OmNzBOhOv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "13420023",
"articleId": "12OmNC4wtEL",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzmclVJ",
"title": "Computing, International Conference on",
"acronym": "cic",
"groupId": "1001748",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxWcH6b",
"doi": "10.1109/CIC.2006.33",
"title": "Control System of Artificial Chemico-Mechanical Muscle. Medical Robotics Application",
"normalizedTitle": "Control System of Artificial Chemico-Mechanical Muscle. Medical Robotics Application",
"abstract": "Currently McKibben's pneumatic muscle shows a great functional analogy to the skeletal muscle. It readily enables to motorize the arms of robots in a closer manner to the human arm than traditional robots. However, the dependence of McKibben's pneumatic muscle with respect to pneumatic source under pressure limits its application to the motorization of arms to fixed station. What is missing with the McKibben's pneumatic muscle is an efficient energy autonomy of the type required in the current project of chemico-mechanical artificial muscle. This present work will focus on an alternative way in which biochemical energy is transformed into osmotic energy and then into mechanical energy by means of a chemico-mechanical muscle and application to medical robotics",
"abstracts": [
{
"abstractType": "Regular",
"content": "Currently McKibben's pneumatic muscle shows a great functional analogy to the skeletal muscle. It readily enables to motorize the arms of robots in a closer manner to the human arm than traditional robots. However, the dependence of McKibben's pneumatic muscle with respect to pneumatic source under pressure limits its application to the motorization of arms to fixed station. What is missing with the McKibben's pneumatic muscle is an efficient energy autonomy of the type required in the current project of chemico-mechanical artificial muscle. This present work will focus on an alternative way in which biochemical energy is transformed into osmotic energy and then into mechanical energy by means of a chemico-mechanical muscle and application to medical robotics",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Currently McKibben's pneumatic muscle shows a great functional analogy to the skeletal muscle. It readily enables to motorize the arms of robots in a closer manner to the human arm than traditional robots. However, the dependence of McKibben's pneumatic muscle with respect to pneumatic source under pressure limits its application to the motorization of arms to fixed station. What is missing with the McKibben's pneumatic muscle is an efficient energy autonomy of the type required in the current project of chemico-mechanical artificial muscle. This present work will focus on an alternative way in which biochemical energy is transformed into osmotic energy and then into mechanical energy by means of a chemico-mechanical muscle and application to medical robotics",
"fno": "27080209",
"keywords": [
"Artificial Limbs",
"Medical Robotics",
"Muscle",
"Control System",
"Artificial Chemico Mechanical Muscle",
"Medical Robotics",
"Mc Kibben Pneumatic Muscle",
"Skeletal Muscle",
"Robot Arms",
"Biochemical Energy",
"Osmotic Energy",
"Mechanical Energy",
"Medical Control Systems",
"Control Systems",
"Muscles",
"Medical Robotics",
"Calcium",
"Mechanical Energy",
"Arm",
"Humans",
"Neuromuscular",
"Sugar"
],
"authors": [
{
"affiliation": "LESIA-INSA, Toulouse",
"fullName": "Diaz Zagal Sergio",
"givenName": "Diaz Zagal",
"surname": "Sergio",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "LESIA-INSA, Toulouse",
"fullName": "Tondu Bertrand",
"givenName": "Tondu",
"surname": "Bertrand",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cic",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-11-01T00:00:00",
"pubType": "proceedings",
"pages": "209-214",
"year": "2006",
"issn": null,
"isbn": "0-7695-2708-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "27080126",
"articleId": "12OmNwDACDB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "27080135",
"articleId": "12OmNzzP5JP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icicta/2008/3357/1/3357a721",
"title": "Study on Modeling of Mckibben Pneumatic Artificial Muscle",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2008/3357a721/12OmNAWpywq",
"parentPublication": {
"id": "proceedings/icicta/2008/3357/1",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/artcom/2010/4201/0/4201a186",
"title": "Bio-mimetic Behaviour of IPMC Artificial Muscle Using EMG Signal",
"doi": null,
"abstractUrl": "/proceedings-article/artcom/2010/4201a186/12OmNBLdKQm",
"parentPublication": {
"id": "proceedings/artcom/2010/4201/0",
"title": "Advances in Recent Technologies in Communication and Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cpsna/2016/4403/0/4403a049",
"title": "Application of Particle Swarm Optimization to Parameter Estimation of a McKibben Pneumatic Artificial Muscle Model",
"doi": null,
"abstractUrl": "/proceedings-article/cpsna/2016/4403a049/12OmNrkjVbB",
"parentPublication": {
"id": "proceedings/cpsna/2016/4403/0",
"title": "2016 IEEE 4th International Conference on Cyber-Physical Systems, Networks, and Applications (CPSNA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/1995/7117/0/71170004",
"title": "Mechanical Introscopy -- A New Modality of Medical Imaging for Detection of Breast and Prostate Cancer",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1995/71170004/12OmNvFYQJZ",
"parentPublication": {
"id": "proceedings/cbms/1995/7117/0",
"title": "Proceedings Eighth IEEE Symposium on Computer-Based Medical Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cpsna/2016/4403/0/4403a044",
"title": "Simultaneous Estimation of Contraction Ratio and Parameter of McKibben Pneumatic Artificial Muscle Model Using Log-Normalized Unscented Kalman Filter",
"doi": null,
"abstractUrl": "/proceedings-article/cpsna/2016/4403a044/12OmNvSKNXz",
"parentPublication": {
"id": "proceedings/cpsna/2016/4403/0",
"title": "2016 IEEE 4th International Conference on Cyber-Physical Systems, Networks, and Applications (CPSNA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220317",
"title": "Statistical analysis of muscle-actuated manipulators",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220317/12OmNwt5slP",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/artcom/2009/3845/0/3845a461",
"title": "Quantum Mechanical Modeling and Molecular Dynamic Simulation of Ruthenium (Ru) Polypyridyl Complexes to Study Feasibility of Artificial Photosynthesis",
"doi": null,
"abstractUrl": "/proceedings-article/artcom/2009/3845a461/12OmNzZmZmB",
"parentPublication": {
"id": "proceedings/artcom/2009/3845/0",
"title": "Advances in Recent Technologies in Communication and Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2017/10/mco2017100028",
"title": "Interactive Systems Based on Electrical Muscle Stimulation",
"doi": null,
"abstractUrl": "/magazine/co/2017/10/mco2017100028/13rRUILc8aJ",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2017/03/mpc2017030012",
"title": "Immense Power in a Tiny Package: Wearables Based on Electrical Muscle Stimulation",
"doi": null,
"abstractUrl": "/magazine/pc/2017/03/mpc2017030012/13rRUxAAT4D",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compauto/2022/8194/0/819400a048",
"title": "Pneumatic Artificial Muscle Antagonistic Joint Trajectory Tracking Using Adaptive Explicit Model Predictive Control",
"doi": null,
"abstractUrl": "/proceedings-article/compauto/2022/819400a048/1KxUewZNOuI",
"parentPublication": {
"id": "proceedings/compauto/2022/8194/0",
"title": "2022 2nd International Conference on Computers and Automation (CompAuto)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx19jVT",
"title": "3rd Annual IEEE Conference on Automation Science and Engineering",
"acronym": "case",
"groupId": "1001095",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyO8tNg",
"doi": "10.1109/COASE.2007.4341812",
"title": "The DOHELIX-Muscle: A Novel Technical Muscle for Bionic Robots and Actuating Drive Applications",
"normalizedTitle": "The DOHELIX-Muscle: A Novel Technical Muscle for Bionic Robots and Actuating Drive Applications",
"abstract": "In this paper a new concept of a technical muscle is presented. The proposed muscle is based on standard components and is scalable in many respects: size, weight, power, speed, price, and quality. It is composed of a turning shaft with small diameter and a high-strength and highly flexible plaited cord. The shaft may be driven by a small DC-motor, possibly in combination with a gearbox. This makes design, power supply, and control quite easy compared to other types of artificial muscles such as pneumatic, hydraulic, shape memory alloy, and electro-active polymer muscles. With appropriate measures in design it will be able to meet various application requirements such as wide temperature range and high IP rating. Moreover, with careful selection and dimensioning of components a very high degree of efficiency, a very high power to weight ratio, and high dependability may be achieved. The muscle is applicable as an actuating drive in industrial environments as well as for bionic robot mechanisms with biomimetic and undulatory motion. The name DOHELIX is an acronym for 'double helix', a shape resulting from contraction of the muscle.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper a new concept of a technical muscle is presented. The proposed muscle is based on standard components and is scalable in many respects: size, weight, power, speed, price, and quality. It is composed of a turning shaft with small diameter and a high-strength and highly flexible plaited cord. The shaft may be driven by a small DC-motor, possibly in combination with a gearbox. This makes design, power supply, and control quite easy compared to other types of artificial muscles such as pneumatic, hydraulic, shape memory alloy, and electro-active polymer muscles. With appropriate measures in design it will be able to meet various application requirements such as wide temperature range and high IP rating. Moreover, with careful selection and dimensioning of components a very high degree of efficiency, a very high power to weight ratio, and high dependability may be achieved. The muscle is applicable as an actuating drive in industrial environments as well as for bionic robot mechanisms with biomimetic and undulatory motion. The name DOHELIX is an acronym for 'double helix', a shape resulting from contraction of the muscle.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper a new concept of a technical muscle is presented. The proposed muscle is based on standard components and is scalable in many respects: size, weight, power, speed, price, and quality. It is composed of a turning shaft with small diameter and a high-strength and highly flexible plaited cord. The shaft may be driven by a small DC-motor, possibly in combination with a gearbox. This makes design, power supply, and control quite easy compared to other types of artificial muscles such as pneumatic, hydraulic, shape memory alloy, and electro-active polymer muscles. With appropriate measures in design it will be able to meet various application requirements such as wide temperature range and high IP rating. Moreover, with careful selection and dimensioning of components a very high degree of efficiency, a very high power to weight ratio, and high dependability may be achieved. The muscle is applicable as an actuating drive in industrial environments as well as for bionic robot mechanisms with biomimetic and undulatory motion. The name DOHELIX is an acronym for 'double helix', a shape resulting from contraction of the muscle.",
"fno": "04341812",
"keywords": [
"Biomimetics",
"Muscle",
"Robots",
"Shafts",
"DOHELIX Muscle",
"Bionic Robots",
"Technical Muscle",
"Turning Shaft",
"Artificial Muscles",
"Actuating Drive",
"Biomimetic Motion",
"Undulatory Motion",
"Muscles",
"Shafts",
"Turning",
"Power Supplies",
"Shape Control",
"Shape Memory Alloys",
"Polymers",
"Temperature Distribution",
"Service Robots",
"Biomimetics"
],
"authors": [
{
"affiliation": "Fraunhofer Institute of Manufacturing Engineering and Automation IPA, Stuttgart, Germany. phone: +497119701432, fax: +497119701008, e-mail: staab@ipa.fraunhofer.de",
"fullName": "Harald Staab",
"givenName": "Harald",
"surname": "Staab",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dresden University of Technology, Germany.",
"fullName": "Arne Sonnenburg",
"givenName": "Arne",
"surname": "Sonnenburg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences Technikum Wien, Austria.",
"fullName": "Christof Hieger",
"givenName": "Christof",
"surname": "Hieger",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "case",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-09-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2007",
"issn": "2161-8070",
"isbn": "978-1-4244-1153-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04341811",
"articleId": "12OmNxdm4AO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04341813",
"articleId": "12OmNwcCIIq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/grc/2011/0372/0/06122581",
"title": "A new tongue model based on muscle-control",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2011/06122581/12OmNA0MZ1l",
"parentPublication": {
"id": "proceedings/grc/2011/0372/0",
"title": "2011 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607595",
"title": "Muscle-driven modeling of wrinkles for 3D facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607595/12OmNrkjVeg",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/1/71080378",
"title": "Adaptive position control of antagonistic pneumatic muscle actuators",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71080378/12OmNroijgU",
"parentPublication": {
"id": "proceedings/iros/1995/7108/1",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/car/2009/3519/0/3519a003",
"title": "Modeling and Characteristics Analysis of Intelligent Pneumatic Muscle with Shape Memory Alloy Braided Shell",
"doi": null,
"abstractUrl": "/proceedings-article/car/2009/3519a003/12OmNvrMUjj",
"parentPublication": {
"id": "proceedings/car/2009/3519/0",
"title": "2009 International Asia Conference on Informatics in Control, Automation and Robotics. CAR 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2012/4357/0/06399749",
"title": "Image-based estimation of biomechanical relationship between masticatory muscle activities and mandibular movement",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2012/06399749/12OmNwCJORK",
"parentPublication": {
"id": "proceedings/bibe/2012/4357/0",
"title": "2012 IEEE 12th International Conference on Bioinformatics & Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/03/v0317",
"title": "Creating and Simulating Skeletal Muscle from the Visible Human Data Set",
"doi": null,
"abstractUrl": "/journal/tg/2005/03/v0317/13rRUwgQpDg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hbdss/2021/2188/0/218800a086",
"title": "Telemetry Data Analysis on sEMG Major Muscle Groups in Hanging-up-and-passing-through Horizontal Ladder of 400m Obstacles",
"doi": null,
"abstractUrl": "/proceedings-article/hbdss/2021/218800a086/1AqwRXXF1fi",
"parentPublication": {
"id": "proceedings/hbdss/2021/2188/0",
"title": "2021 International Conference on Health Big Data and Smart Sports (HBDSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2022/8487/0/848700a107",
"title": "Effects of Increased Arm Muscle Tone on Postural Recovery from External Forces: A simulation study",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2022/848700a107/1J6hFO9CDDi",
"parentPublication": {
"id": "proceedings/bibe/2022/8487/0",
"title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2021/6834/0/683400a621",
"title": "Telemetry Data Analysis on sEMG Major Muscle Groups in Three-step Leap over Stumps of 400m Obstacle",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2021/683400a621/1vmLLuO0STS",
"parentPublication": {
"id": "proceedings/icekim/2021/6834/0",
"title": "2021 2nd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiea/2021/3265/0/326500a194",
"title": "Semi-flexible Bionic Whisker Sensor Based on Triboelectric Nanogenerators",
"doi": null,
"abstractUrl": "/proceedings-article/aiea/2021/326500a194/1wzsByaTLi0",
"parentPublication": {
"id": "proceedings/aiea/2021/3265/0",
"title": "2021 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwLOYSr",
"title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzdoMhX",
"doi": "10.1109/ICMEW.2016.7574698",
"title": "A proposal of virtual food texture by electric muscle stimulation",
"normalizedTitle": "A proposal of virtual food texture by electric muscle stimulation",
"abstract": "We propose a novel method to present virtual food texture by using electrical muscle stimulation to the masseter muscle. In this paper, we describe our proposal system; “Electric Food Texture System” consisting of “bite detection part”, “food texture database”, and “electric stimulation part”. We used a photoreflector to measure motion of user's lower jaw for “bite detection”, electromyography sensors to measure food texture for “food texture database”, and a medical electrical stimulator for “electric stimulation”. We conducted some preliminary studies to verify the feasibility of our proposed method. The results suggest that the photoreflector can detect bite, peak amplitude and pulse width of EMG are useful to represent food texture, and users feel hardness and elasticity of virtual food texture by presenting EMS to the masseter muscle. Our contribution is to show the system and investigate the feasibility to present various food texture for improving dining experience.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel method to present virtual food texture by using electrical muscle stimulation to the masseter muscle. In this paper, we describe our proposal system; “Electric Food Texture System” consisting of “bite detection part”, “food texture database”, and “electric stimulation part”. We used a photoreflector to measure motion of user's lower jaw for “bite detection”, electromyography sensors to measure food texture for “food texture database”, and a medical electrical stimulator for “electric stimulation”. We conducted some preliminary studies to verify the feasibility of our proposed method. The results suggest that the photoreflector can detect bite, peak amplitude and pulse width of EMG are useful to represent food texture, and users feel hardness and elasticity of virtual food texture by presenting EMS to the masseter muscle. Our contribution is to show the system and investigate the feasibility to present various food texture for improving dining experience.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel method to present virtual food texture by using electrical muscle stimulation to the masseter muscle. In this paper, we describe our proposal system; “Electric Food Texture System” consisting of “bite detection part”, “food texture database”, and “electric stimulation part”. We used a photoreflector to measure motion of user's lower jaw for “bite detection”, electromyography sensors to measure food texture for “food texture database”, and a medical electrical stimulator for “electric stimulation”. We conducted some preliminary studies to verify the feasibility of our proposed method. The results suggest that the photoreflector can detect bite, peak amplitude and pulse width of EMG are useful to represent food texture, and users feel hardness and elasticity of virtual food texture by presenting EMS to the masseter muscle. Our contribution is to show the system and investigate the feasibility to present various food texture for improving dining experience.",
"fno": "07574698",
"keywords": [
"Electromyography",
"Muscles",
"Sensors",
"Haptic Interfaces",
"Databases",
"Medical Services",
"Mouth",
"Virtual Reality",
"Food Texture",
"Electrical Muscle Stimulation"
],
"authors": [
{
"affiliation": "Graduate School of Engineering, The University of Tokyo, Japan",
"fullName": "Arinobu Niijima",
"givenName": "Arinobu",
"surname": "Niijima",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Information Technology Center, The University of Tokyo, Japan",
"fullName": "Takefumi Ogawa",
"givenName": "Takefumi",
"surname": "Ogawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-1552-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07574697",
"articleId": "12OmNBNM9aN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07574699",
"articleId": "12OmNwDSdFk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2014/4435/0/4435a421",
"title": "Pervasive Motion Tracking and Muscle Activity Monitor",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2014/4435a421/12OmNscxj9p",
"parentPublication": {
"id": "proceedings/cbms/2014/4435/0",
"title": "2014 IEEE 27th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isms/2012/4668/0/4668a177",
"title": "Development of Hamstrings Muscle Model for Paraplegic with Functional Electrical Stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/isms/2012/4668a177/12OmNxFaLdS",
"parentPublication": {
"id": "proceedings/isms/2012/4668/0",
"title": "Intelligent Systems, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504715",
"title": "Vestibulohaptic passive stimulation for a walking sensation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504715/12OmNxu6p8R",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compeng/2010/3974/0/3974a132",
"title": "Development of Dynamic Muscle Model with Functional Electrical Stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/compeng/2010/3974a132/12OmNyFU7bN",
"parentPublication": {
"id": "proceedings/compeng/2010/3974/0",
"title": "Engineering. Complexity in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504744",
"title": "bioSync: Wearable haptic I/O device for synchronous kinesthetic interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504744/12OmNyKJiB6",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2017/10/mco2017100028",
"title": "Interactive Systems Based on Electrical Muscle Stimulation",
"doi": null,
"abstractUrl": "/magazine/co/2017/10/mco2017100028/13rRUILc8aJ",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2017/03/mpc2017030012",
"title": "Immense Power in a Tiny Package: Wearables Based on Electrical Muscle Stimulation",
"doi": null,
"abstractUrl": "/magazine/pc/2017/03/mpc2017030012/13rRUxAAT4D",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscer/2022/8478/0/847800a026",
"title": "Multichannel asynchronous electrical stimulation device relieves muscle fatigue caused by stimulation therapy",
"doi": null,
"abstractUrl": "/proceedings-article/iscer/2022/847800a026/1HbbCwGuMHC",
"parentPublication": {
"id": "proceedings/iscer/2022/8478/0",
"title": "2022 International Symposium on Control Engineering and Robotics (ISCER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2020/9429/0/942900a095",
"title": "Detection of Muscle Fatigue by Fusion of Agonist and Synergistic Muscle sEMG Signals",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2020/942900a095/1mLMgzNcY7e",
"parentPublication": {
"id": "proceedings/cbms/2020/9429/0",
"title": "2020 IEEE 33rd International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2021/9101/0/09333969",
"title": "A Precise Muscle Activity Onset/Offset Detection via EMG Signal",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2021/09333969/1qTrPXuv2BW",
"parentPublication": {
"id": "proceedings/icoin/2021/9101/0",
"title": "2021 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1AqwOqlf7Es",
"title": "2021 International Conference on Health Big Data and Smart Sports (HBDSS)",
"acronym": "hbdss",
"groupId": "1845204",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1AqwQMWd0Ag",
"doi": "10.1109/HBDSS54392.2021.00020",
"title": "Isokinetic strength training improves motor function in a Chinese classical dancer with knee injury: a case report",
"normalizedTitle": "Isokinetic strength training improves motor function in a Chinese classical dancer with knee injury: a case report",
"abstract": "Background: The muscle strength around the knee is essential for maintaining the stability and function of the knee joint of the dancer. If the musculoskeletal system in dancers is imbalanced and weakness this can cause knee injuries. Isokinetic muscle strength training is a special muscle strength training, but scarce studies have explored the effect of the recovery of knee joint function and stability after Chinese classical dancers’ knee injury.Objective: The purpose of this study was to examine whether 10 weeks of isokinetic muscle strength training would improve the functional health status of knee joint injuries in a Chinese classical dancer.Materials and Methods: A male Chinese classical dancer (age, 22 years) diagnosed with chondropathy of the patella, meniscus injury, patellar tendon injury and patellar cartilage injury in the left knee was recruited. The patient underwent a total of 10 weeks of isokinetic strength training of the flexor and extensor muscles on the left leg. Visual analog scale (VAS) score, the peak torque (PT), the total work (TW), the PT ratio of flexors and extensors (hamstring-to-quadriceps strength ratio, H/Q) at the angle of 60°/s, and the Lysholm score were measured before and after 10-week of training.Results: The VAS score decreased from 6.5 to 2.5. The PT of flexors improved from 53.2 to 102.6, which increased by 93%, and the PT of extensors improved from 76.7 to 172.5, which increased by 125%. The TW of flexors improved from 56.6 to 124.9, and extensors improved from 75.8 to 165. The H/Q improved from 69% to 59%. The Lysholm score increased from 47 to 72.Conclusion: Isokinetic strength training of the flexor and extensor muscles on the leg improves motor function and stability of knee joint in the Chinese classical dancer.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Background: The muscle strength around the knee is essential for maintaining the stability and function of the knee joint of the dancer. If the musculoskeletal system in dancers is imbalanced and weakness this can cause knee injuries. Isokinetic muscle strength training is a special muscle strength training, but scarce studies have explored the effect of the recovery of knee joint function and stability after Chinese classical dancers’ knee injury.Objective: The purpose of this study was to examine whether 10 weeks of isokinetic muscle strength training would improve the functional health status of knee joint injuries in a Chinese classical dancer.Materials and Methods: A male Chinese classical dancer (age, 22 years) diagnosed with chondropathy of the patella, meniscus injury, patellar tendon injury and patellar cartilage injury in the left knee was recruited. The patient underwent a total of 10 weeks of isokinetic strength training of the flexor and extensor muscles on the left leg. Visual analog scale (VAS) score, the peak torque (PT), the total work (TW), the PT ratio of flexors and extensors (hamstring-to-quadriceps strength ratio, H/Q) at the angle of 60°/s, and the Lysholm score were measured before and after 10-week of training.Results: The VAS score decreased from 6.5 to 2.5. The PT of flexors improved from 53.2 to 102.6, which increased by 93%, and the PT of extensors improved from 76.7 to 172.5, which increased by 125%. The TW of flexors improved from 56.6 to 124.9, and extensors improved from 75.8 to 165. The H/Q improved from 69% to 59%. The Lysholm score increased from 47 to 72.Conclusion: Isokinetic strength training of the flexor and extensor muscles on the leg improves motor function and stability of knee joint in the Chinese classical dancer.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Background: The muscle strength around the knee is essential for maintaining the stability and function of the knee joint of the dancer. If the musculoskeletal system in dancers is imbalanced and weakness this can cause knee injuries. Isokinetic muscle strength training is a special muscle strength training, but scarce studies have explored the effect of the recovery of knee joint function and stability after Chinese classical dancers’ knee injury.Objective: The purpose of this study was to examine whether 10 weeks of isokinetic muscle strength training would improve the functional health status of knee joint injuries in a Chinese classical dancer.Materials and Methods: A male Chinese classical dancer (age, 22 years) diagnosed with chondropathy of the patella, meniscus injury, patellar tendon injury and patellar cartilage injury in the left knee was recruited. The patient underwent a total of 10 weeks of isokinetic strength training of the flexor and extensor muscles on the left leg. Visual analog scale (VAS) score, the peak torque (PT), the total work (TW), the PT ratio of flexors and extensors (hamstring-to-quadriceps strength ratio, H/Q) at the angle of 60°/s, and the Lysholm score were measured before and after 10-week of training.Results: The VAS score decreased from 6.5 to 2.5. The PT of flexors improved from 53.2 to 102.6, which increased by 93%, and the PT of extensors improved from 76.7 to 172.5, which increased by 125%. The TW of flexors improved from 56.6 to 124.9, and extensors improved from 75.8 to 165. The H/Q improved from 69% to 59%. The Lysholm score increased from 47 to 72.Conclusion: Isokinetic strength training of the flexor and extensor muscles on the leg improves motor function and stability of knee joint in the Chinese classical dancer.",
"fno": "218800a060",
"keywords": [
"Knee",
"Training",
"Legged Locomotion",
"Visualization",
"Torque",
"Muscles",
"Torque Measurement",
"Knee Injury",
"Isokinetic Strength Training",
"Chinese Classical Dancer",
"Peak Torque",
"Total Work",
"Quadriceps Ratios"
],
"authors": [
{
"affiliation": "Capital University,Physical Education and Sports,Beijing,China",
"fullName": "Yanjing Ren",
"givenName": "Yanjing",
"surname": "Ren",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Sport University,School of Sports Medicine and Rehabilitation,Beijing,China",
"fullName": "Tian Yue",
"givenName": "Tian",
"surname": "Yue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Sport University,Sports, Exercise and Brain Sciences Laboratory,Beijing,China",
"fullName": "Fengxue Qi",
"givenName": "Fengxue",
"surname": "Qi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hbdss",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "60-64",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2188-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "218800a056",
"articleId": "1AqwRHCAE12",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "218800a065",
"articleId": "1AqwSzYM3n2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2014/5179/0/06850824",
"title": "EMG signals in muscular co-activations for dynamic analysis of knee joint",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2014/06850824/12OmNAYGlwD",
"parentPublication": {
"id": "proceedings/iciev/2014/5179/0",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic3/2014/5172/0/06897178",
"title": "Trajectory generation for myoelectrically controlled lower limb active knee exoskeleton",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2014/06897178/12OmNBDyA7R",
"parentPublication": {
"id": "proceedings/ic3/2014/5172/0",
"title": "2014 Seventh International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbmsys/1990/9040/0/00109424",
"title": "Reduction of interference in knee sound signals by adaptive filtering",
"doi": null,
"abstractUrl": "/proceedings-article/cbmsys/1990/00109424/12OmNvlxJvJ",
"parentPublication": {
"id": "proceedings/cbmsys/1990/9040/0",
"title": "1990 Proceedings Third Annual IEEE Symposium on Computer-Based Medical Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eecs/2017/2085/0/2085a464",
"title": "The Improvement Effect of Muscular Activities and Foot Forces according to Sound Stimulus in Muscle Strength Imbalance during Gait",
"doi": null,
"abstractUrl": "/proceedings-article/eecs/2017/2085a464/12OmNwNwzMG",
"parentPublication": {
"id": "proceedings/eecs/2017/2085/0",
"title": "2017 European Conference on Electrical Engineering and Computer Science (EECS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bsn/2011/4431/0/05955293",
"title": "Observing Recovery from Knee-Replacement Surgery by Using Wearable Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/bsn/2011/05955293/12OmNz61dAm",
"parentPublication": {
"id": "proceedings/bsn/2011/4431/0",
"title": "Wearable and Implantable Body Sensor Networks, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfcse/2011/1562/0/06041689",
"title": "The Affection of Posture of Tai Chi Exercise to Internal Force and Muscular Torque of Knee and Ankle",
"doi": null,
"abstractUrl": "/proceedings-article/icfcse/2011/06041689/12OmNzahcdO",
"parentPublication": {
"id": "proceedings/icfcse/2011/1562/0",
"title": "2011 International Conference on Future Computer Science and Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a288",
"title": "A Bio-Inspired Musculoskeletal Model of the Lower Limb for Energy Economical Bipedal Walking",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a288/18M7ixxaQr6",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hbdss/2021/2188/0/218800a268",
"title": "Research on Lower Limb Motion Characteristics of Oblique Stride on “Euro Step” Layup Based on Intelligent Motion Capture System",
"doi": null,
"abstractUrl": "/proceedings-article/hbdss/2021/218800a268/1AqwSHB6zOo",
"parentPublication": {
"id": "proceedings/hbdss/2021/2188/0",
"title": "2021 International Conference on Health Big Data and Smart Sports (HBDSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismii/2021/1290/0/129000a070",
"title": "Research on Promotion of Lower Limb Movement Function Recovery after Stroke by Using Lower Limb Rehabilitation Robot in Combination with Constant Velocity Muscle Strength Training",
"doi": null,
"abstractUrl": "/proceedings-article/ismii/2021/129000a070/1sZ2L7YVSCs",
"parentPublication": {
"id": "proceedings/ismii/2021/1290/0",
"title": "2021 7th International Symposium on Mechatronics and Industrial Informatics (ISMII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tcs/2021/2910/0/291000a588",
"title": "The effect of 4 weeks Interactive Video Games training on the lower limb strength in the college students",
"doi": null,
"abstractUrl": "/proceedings-article/tcs/2021/291000a588/1wRIguKiLIc",
"parentPublication": {
"id": "proceedings/tcs/2021/2910/0",
"title": "2021 International Conference on Information Technology and Contemporary Sports (TCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1rCg5NWvMis",
"title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"acronym": "icaice",
"groupId": "1840544",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1rCgcpdOyhW",
"doi": "10.1109/ICAICE51518.2020.00017",
"title": "Control Strategy for Upper Limb Rehabilitation Robot Based on Muscle Strength Estimation",
"normalizedTitle": "Control Strategy for Upper Limb Rehabilitation Robot Based on Muscle Strength Estimation",
"abstract": "This paper proposes a control strategy for the active training mode of upper limb rehabilitation robot based on muscle strength estimation aiming at the control problem of active interaction of rehabilitation robots for patients with hemiplegia. Firstly, the sEMG signal is preprocessed by filtering and notch methods and extracted by three time-domain eigenvalues of root mean square value, absolute value mean value and variance after completing the collection of the surface electromyography (sEMG) and muscle strength signals of the upper limbs. Secondly, the muscle strength estimation model is evaluated by relative root mean square error and R-squared after proposing a muscle strength estimation algorithm based on Adaboost improved BP neural network (BPNN). Relative root means square error value and R-squared value decrease by 0.0340 and -0.4143 on average respectively compared to the BP model. The result shows that the effect of muscle strength estimation has been significantly improved. Lastly, the simulation of the patient's elbow flexion task shows the feasibility of the controller after introducing the patient's motion intention moment into the force impedance controller.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a control strategy for the active training mode of upper limb rehabilitation robot based on muscle strength estimation aiming at the control problem of active interaction of rehabilitation robots for patients with hemiplegia. Firstly, the sEMG signal is preprocessed by filtering and notch methods and extracted by three time-domain eigenvalues of root mean square value, absolute value mean value and variance after completing the collection of the surface electromyography (sEMG) and muscle strength signals of the upper limbs. Secondly, the muscle strength estimation model is evaluated by relative root mean square error and R-squared after proposing a muscle strength estimation algorithm based on Adaboost improved BP neural network (BPNN). Relative root means square error value and R-squared value decrease by 0.0340 and -0.4143 on average respectively compared to the BP model. The result shows that the effect of muscle strength estimation has been significantly improved. Lastly, the simulation of the patient's elbow flexion task shows the feasibility of the controller after introducing the patient's motion intention moment into the force impedance controller.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a control strategy for the active training mode of upper limb rehabilitation robot based on muscle strength estimation aiming at the control problem of active interaction of rehabilitation robots for patients with hemiplegia. Firstly, the sEMG signal is preprocessed by filtering and notch methods and extracted by three time-domain eigenvalues of root mean square value, absolute value mean value and variance after completing the collection of the surface electromyography (sEMG) and muscle strength signals of the upper limbs. Secondly, the muscle strength estimation model is evaluated by relative root mean square error and R-squared after proposing a muscle strength estimation algorithm based on Adaboost improved BP neural network (BPNN). Relative root means square error value and R-squared value decrease by 0.0340 and -0.4143 on average respectively compared to the BP model. The result shows that the effect of muscle strength estimation has been significantly improved. Lastly, the simulation of the patient's elbow flexion task shows the feasibility of the controller after introducing the patient's motion intention moment into the force impedance controller.",
"fno": "914600a054",
"keywords": [
"Backpropagation",
"Eigenvalues And Eigenfunctions",
"Electromyography",
"Learning Artificial Intelligence",
"Mean Square Error Methods",
"Medical Robotics",
"Medical Signal Processing",
"Patient Rehabilitation",
"Control Strategy",
"Upper Limb Rehabilitation Robot",
"Active Training Mode",
"Control Problem",
"R Squared Value",
"Absolute Value Mean Value",
"Muscle Strength Signals",
"Muscle Strength Estimation Model",
"Relative Root Mean Square Error",
"Muscle Strength Estimation Algorithm",
"Square Error Value",
"S EMG Signal",
"Adaboost Improved BP Neural Network",
"Patient Elbow Flexion Task",
"Training",
"Neural Networks",
"Estimation",
"Muscles",
"Rehabilitation Robotics",
"Root Mean Square",
"Time Domain Analysis",
"S EMG Signal",
"Upper Limb Rehabilitation Robot",
"Muscle Strength Estimation",
"Active Training Mode"
],
"authors": [
{
"affiliation": "Anhui University of Technology,School of Mechanical Engineering,Maanshan,China,243002",
"fullName": "Qingyun Liu",
"givenName": "Qingyun",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Anhui University of Technology,School of Mechanical Engineering,Maanshan,China,243002",
"fullName": "Mengxuan Zhang",
"givenName": "Mengxuan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Anhui University of Technology,School of Mechanical Engineering,Maanshan,China,243002",
"fullName": "Tao Liu",
"givenName": "Tao",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Anhui University of Technology,School of Mechanical Engineering,Maanshan,China,243002",
"fullName": "Chengchen Wang",
"givenName": "Chengchen",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icaice",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-10-01T00:00:00",
"pubType": "proceedings",
"pages": "54-60",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9146-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "914600a050",
"articleId": "1rCg7AfWBoY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "914600a061",
"articleId": "1rCg7q2Fwd2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccsit/2009/4519/0/05234929",
"title": "Analysis of EMG signal using wavelet coefficients for upper limb function",
"doi": null,
"abstractUrl": "/proceedings-article/iccsit/2009/05234929/12OmNwlHSSF",
"parentPublication": {
"id": "proceedings/iccsit/2009/4519/0",
"title": "Computer Science and Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbec/2016/2132/0/07459037",
"title": "Design of Smart Portable Rehabilitation Exoskeletal Device for Upper Limb",
"doi": null,
"abstractUrl": "/proceedings-article/sbec/2016/07459037/12OmNxwnccb",
"parentPublication": {
"id": "proceedings/sbec/2016/2132/0",
"title": "2016 32nd Southern Biomedical Engineering Conference (SBEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2013/5159/0/06726376",
"title": "The Research and Application of sEMG in Massage Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2013/06726376/12OmNyrIawG",
"parentPublication": {
"id": "proceedings/msn/2013/5159/0",
"title": "2013 Ninth International Conference on Mobile Ad-hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbeb/2012/4706/0/4706a917",
"title": "The Design of a Rehabilitation Training System with EMG Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/icbeb/2012/4706a917/12OmNzgwmQk",
"parentPublication": {
"id": "proceedings/icbeb/2012/4706/0",
"title": "Biomedical Engineering and Biotechnology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2018/7516/3/08549932",
"title": "EMG Feature Extractions for Upper-Limb Functional Movement During Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2018/08549932/17D45WHONrN",
"parentPublication": {
"id": "proceedings/iciibms/2018/7516/3",
"title": "2018 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichci/2021/0764/0/076400a231",
"title": "Evaluation of Upper Limb Comfort Based on Muscle Activation and Joint Angle in Human-Machine Interface",
"doi": null,
"abstractUrl": "/proceedings-article/ichci/2021/076400a231/1Bb0QKDD5a8",
"parentPublication": {
"id": "proceedings/ichci/2021/0764/0",
"title": "2021 2nd International Conference on Intelligent Computing and Human-Computer Interaction (ICHCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049714",
"title": "A Video-Based Augmented Reality System for Human-in-the-Loop Muscle Strength Assessment of Juvenile Dermatomyositis",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049714/1KYonwZBA08",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2020/9429/0/942900a095",
"title": "Detection of Muscle Fatigue by Fusion of Agonist and Synergistic Muscle sEMG Signals",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2020/942900a095/1mLMgzNcY7e",
"parentPublication": {
"id": "proceedings/cbms/2020/9429/0",
"title": "2020 IEEE 33rd International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismii/2021/1290/0/129000a070",
"title": "Research on Promotion of Lower Limb Movement Function Recovery after Stroke by Using Lower Limb Rehabilitation Robot in Combination with Constant Velocity Muscle Strength Training",
"doi": null,
"abstractUrl": "/proceedings-article/ismii/2021/129000a070/1sZ2L7YVSCs",
"parentPublication": {
"id": "proceedings/ismii/2021/1290/0",
"title": "2021 7th International Symposium on Mechatronics and Industrial Informatics (ISMII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2021/6834/0/683400a621",
"title": "Telemetry Data Analysis on sEMG Major Muscle Groups in Three-step Leap over Stumps of 400m Obstacle",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2021/683400a621/1vmLLuO0STS",
"parentPublication": {
"id": "proceedings/icekim/2021/6834/0",
"title": "2021 2nd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1sZ2JrUFUVW",
"title": "2021 7th International Symposium on Mechatronics and Industrial Informatics (ISMII)",
"acronym": "ismii",
"groupId": "1841344",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1sZ2L7YVSCs",
"doi": "10.1109/ISMII52409.2021.00022",
"title": "Research on Promotion of Lower Limb Movement Function Recovery after Stroke by Using Lower Limb Rehabilitation Robot in Combination with Constant Velocity Muscle Strength Training",
"normalizedTitle": "Research on Promotion of Lower Limb Movement Function Recovery after Stroke by Using Lower Limb Rehabilitation Robot in Combination with Constant Velocity Muscle Strength Training",
"abstract": "In order to observe the correlation between lower limb rehabilitation robot and isokinetic muscle strength training after stroke on their lower limb motor function. We collected 90 stroke patients treated in our rehabilitation center from April 2018 to may 2020 and divided them into three groups by sampling method. Group A was assisted by lower limb rehabilitation robot training, group B was given isokinetic muscle strength training, and group C was assisted by lower limb rehabilitation robot coordinated isokinetic muscle strength training. Finally, we obtained the following results: compared with before intervention, FMA, Holden walking function classification, BBS and peak torque of three groups were significantly improved after 6 weeks of intervention (P <; 0.05), and BBS score and angular velocity (60, 120, 180 ° / s) of extensor and knee flexor in group C were significantly increased compared with group A and group B (P <; 0.05), and there was no significant difference between group A and group B (P > 0.05). In this paper, the effect of lower limb rehabilitation robot combined with isokinetic muscle strength training after stroke is ideal, which can effectively improve the motor function of lower limbs, and also promote the improvement of muscle strength, balance and walking ability, which is worthy of reference.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In order to observe the correlation between lower limb rehabilitation robot and isokinetic muscle strength training after stroke on their lower limb motor function. We collected 90 stroke patients treated in our rehabilitation center from April 2018 to may 2020 and divided them into three groups by sampling method. Group A was assisted by lower limb rehabilitation robot training, group B was given isokinetic muscle strength training, and group C was assisted by lower limb rehabilitation robot coordinated isokinetic muscle strength training. Finally, we obtained the following results: compared with before intervention, FMA, Holden walking function classification, BBS and peak torque of three groups were significantly improved after 6 weeks of intervention (P <; 0.05), and BBS score and angular velocity (60, 120, 180 ° / s) of extensor and knee flexor in group C were significantly increased compared with group A and group B (P <; 0.05), and there was no significant difference between group A and group B (P > 0.05). In this paper, the effect of lower limb rehabilitation robot combined with isokinetic muscle strength training after stroke is ideal, which can effectively improve the motor function of lower limbs, and also promote the improvement of muscle strength, balance and walking ability, which is worthy of reference.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In order to observe the correlation between lower limb rehabilitation robot and isokinetic muscle strength training after stroke on their lower limb motor function. We collected 90 stroke patients treated in our rehabilitation center from April 2018 to may 2020 and divided them into three groups by sampling method. Group A was assisted by lower limb rehabilitation robot training, group B was given isokinetic muscle strength training, and group C was assisted by lower limb rehabilitation robot coordinated isokinetic muscle strength training. Finally, we obtained the following results: compared with before intervention, FMA, Holden walking function classification, BBS and peak torque of three groups were significantly improved after 6 weeks of intervention (P 0.05). In this paper, the effect of lower limb rehabilitation robot combined with isokinetic muscle strength training after stroke is ideal, which can effectively improve the motor function of lower limbs, and also promote the improvement of muscle strength, balance and walking ability, which is worthy of reference.",
"fno": "129000a070",
"keywords": [
"Diseases",
"Gait Analysis",
"Medical Robotics",
"Muscle",
"Patient Rehabilitation",
"Patient Treatment",
"Lower Limb Movement Function Recovery",
"Stroke",
"Constant Velocity Muscle Strength Training",
"Isokinetic Muscle Strength Training",
"Lower Limb Motor Function",
"Lower Limb Rehabilitation Robot Training",
"Sampling Method",
"Holden Walking Function Classification",
"Angular Velocity",
"Knee Flexor",
"Balance",
"Walking",
"Time 6 0 Week",
"Training",
"Legged Locomotion",
"Knee",
"Torque",
"Mechatronics",
"Muscles",
"Rehabilitation Robotics",
"Post Stroke",
"Lower Limb Motor Function",
"FMA",
"Lower Limb Rehabilitation Robot",
"Isokinetic Muscle Strength Training"
],
"authors": [
{
"affiliation": "Malaya University,Kuala Lumpur,Malaysia",
"fullName": "Haowei Ma",
"givenName": "Haowei",
"surname": "Ma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "70-73",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1290-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "129000a065",
"articleId": "1sZ2M3T6pMs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129000a074",
"articleId": "1sZ2QFtysAU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciicii/2017/2434/0/2434a177",
"title": "Kinematics Analysis and Trajectory Planning of Upper Limb Rehabilitation Robot",
"doi": null,
"abstractUrl": "/proceedings-article/iciicii/2017/2434a177/12OmNA0MZ3G",
"parentPublication": {
"id": "proceedings/iciicii/2017/2434/0",
"title": "2017 International Conference on Industrial Informatics - Computing Technology, Intelligent Technology, Industrial Information Integration (ICIICII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscsic/2017/2941/0/2941a092",
"title": "Tailor-Made Lower Limb Rehabilitation Platform",
"doi": null,
"abstractUrl": "/proceedings-article/iscsic/2017/2941a092/12OmNAWH9us",
"parentPublication": {
"id": "proceedings/iscsic/2017/2941/0",
"title": "2017 International Symposium on Computer Science and Intelligent Controls (ISCSIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeae/2015/8328/0/07386205",
"title": "Sit-to-Stand Simulation for Torque Estimation on Lower Limb Joints",
"doi": null,
"abstractUrl": "/proceedings-article/icmeae/2015/07386205/12OmNAlvHAP",
"parentPublication": {
"id": "proceedings/icmeae/2015/8328/0",
"title": "2015 International Conference on Mechatronics, Electronics and Automotive Engineering (ICMEAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbec/2016/2132/0/07459037",
"title": "Design of Smart Portable Rehabilitation Exoskeletal Device for Upper Limb",
"doi": null,
"abstractUrl": "/proceedings-article/sbec/2016/07459037/12OmNxwnccb",
"parentPublication": {
"id": "proceedings/sbec/2016/2132/0",
"title": "2016 32nd Southern Biomedical Engineering Conference (SBEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2018/7516/3/08549932",
"title": "EMG Feature Extractions for Upper-Limb Functional Movement During Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2018/08549932/17D45WHONrN",
"parentPublication": {
"id": "proceedings/iciibms/2018/7516/3",
"title": "2018 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a288",
"title": "A Bio-Inspired Musculoskeletal Model of the Lower Limb for Energy Economical Bipedal Walking",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a288/18M7ixxaQr6",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2019/2286/0/228600a507",
"title": "LOWER-LIMB FOLLOW-UP: A Surface Electromyography Based Serious Computer Game and Patient Follow-Up System for Lower Extremity Muscle Strengthening Exercises in Physiotherapy and Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2019/228600a507/1cdO1onj03K",
"parentPublication": {
"id": "proceedings/cbms/2019/2286/0",
"title": "2019 IEEE 32nd International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icectt/2020/9928/0/992800a074",
"title": "Game Scene Construction for Lower Limb Rehabilitation Robot Based on Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icectt/2020/992800a074/1oa5enWMmL6",
"parentPublication": {
"id": "proceedings/icectt/2020/9928/0",
"title": "2020 5th International Conference on Electromechanical Control Technology and Transportation (ICECTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaice/2020/9146/0/914600a054",
"title": "Control Strategy for Upper Limb Rehabilitation Robot Based on Muscle Strength Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icaice/2020/914600a054/1rCgcpdOyhW",
"parentPublication": {
"id": "proceedings/icaice/2020/9146/0",
"title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeas/2020/9272/0/927200a254",
"title": "A Novel Design of the Lower Limb Rehabilitation Robot",
"doi": null,
"abstractUrl": "/proceedings-article/icmeas/2020/927200a254/1rsiEKu4zuM",
"parentPublication": {
"id": "proceedings/icmeas/2020/9272/0",
"title": "2020 6th International Conference on Mechanical Engineering and Automation Science (ICMEAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1vmLvNYq8QU",
"title": "2021 2nd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"acronym": "icekim",
"groupId": "1841184",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1vmLLuO0STS",
"doi": "10.1109/ICEKIM52309.2021.00140",
"title": "Telemetry Data Analysis on sEMG Major Muscle Groups in Three-step Leap over Stumps of 400m Obstacle",
"normalizedTitle": "Telemetry Data Analysis on sEMG Major Muscle Groups in Three-step Leap over Stumps of 400m Obstacle",
"abstract": "Objective: The purpose of the research is to study the sEMG characteristics of associated major muscle groups (agonist muscles) in three-step leap over stumps of 400m obstacle and the relationship between them and muscle performance. Methods: This research tests and analyzes the original electromyography, iEMG and iEMG% of seven male cadets who did quite well in 400m obstacle test by using the surface electromyography telemetry and anatomical analysis of the action, combining with sEMG synchronal video collecting data analysis. Results: The order of muscle power that functions in the right lower limb muscle groups and rectus abdominis is the following: tibialis anterior muscle and quadriceps generate power first, followed by rectus abdominis muscles, gastrocnemius, hamstrings, and gluteus maximus in sequence. The iEMG and iEMG% order from large to small is: gastrocnemius, tibialis anterior, quadriceps, hamstrings, rectus abdominis muscle, and gluteus maximus. Conclusion: The completion of three-step leap over stumps mainly depends on the muscle power of lower limbs and the arm strength of forward movement. Actively stretching and extending ankle and knee joints ensure the major muscle power to successfully push off the ground. In the movement of arms, pulling the thighs forward and tightening the knees also generate essential muscle power in the action. Therefore, during the training exercise, the muscle strength of triceps surae of the lower limbs, group before the calf muscles, quadriceps, and iliopsoas muscles should be greatly improved.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Objective: The purpose of the research is to study the sEMG characteristics of associated major muscle groups (agonist muscles) in three-step leap over stumps of 400m obstacle and the relationship between them and muscle performance. Methods: This research tests and analyzes the original electromyography, iEMG and iEMG% of seven male cadets who did quite well in 400m obstacle test by using the surface electromyography telemetry and anatomical analysis of the action, combining with sEMG synchronal video collecting data analysis. Results: The order of muscle power that functions in the right lower limb muscle groups and rectus abdominis is the following: tibialis anterior muscle and quadriceps generate power first, followed by rectus abdominis muscles, gastrocnemius, hamstrings, and gluteus maximus in sequence. The iEMG and iEMG% order from large to small is: gastrocnemius, tibialis anterior, quadriceps, hamstrings, rectus abdominis muscle, and gluteus maximus. Conclusion: The completion of three-step leap over stumps mainly depends on the muscle power of lower limbs and the arm strength of forward movement. Actively stretching and extending ankle and knee joints ensure the major muscle power to successfully push off the ground. In the movement of arms, pulling the thighs forward and tightening the knees also generate essential muscle power in the action. Therefore, during the training exercise, the muscle strength of triceps surae of the lower limbs, group before the calf muscles, quadriceps, and iliopsoas muscles should be greatly improved.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Objective: The purpose of the research is to study the sEMG characteristics of associated major muscle groups (agonist muscles) in three-step leap over stumps of 400m obstacle and the relationship between them and muscle performance. Methods: This research tests and analyzes the original electromyography, iEMG and iEMG% of seven male cadets who did quite well in 400m obstacle test by using the surface electromyography telemetry and anatomical analysis of the action, combining with sEMG synchronal video collecting data analysis. Results: The order of muscle power that functions in the right lower limb muscle groups and rectus abdominis is the following: tibialis anterior muscle and quadriceps generate power first, followed by rectus abdominis muscles, gastrocnemius, hamstrings, and gluteus maximus in sequence. The iEMG and iEMG% order from large to small is: gastrocnemius, tibialis anterior, quadriceps, hamstrings, rectus abdominis muscle, and gluteus maximus. Conclusion: The completion of three-step leap over stumps mainly depends on the muscle power of lower limbs and the arm strength of forward movement. Actively stretching and extending ankle and knee joints ensure the major muscle power to successfully push off the ground. In the movement of arms, pulling the thighs forward and tightening the knees also generate essential muscle power in the action. Therefore, during the training exercise, the muscle strength of triceps surae of the lower limbs, group before the calf muscles, quadriceps, and iliopsoas muscles should be greatly improved.",
"fno": "683400a621",
"keywords": [
"Biomedical Electrodes",
"Biomedical Telemetry",
"Data Analysis",
"Electromyography",
"Gait Analysis",
"Medical Signal Processing",
"Muscle Performance",
"Obstacle Test",
"Surface Electromyography Telemetry",
"S EMG Synchronal Video Collecting Data Analysis",
"Right Lower Limb Muscle Groups",
"Tibialis Anterior Muscle",
"Rectus Abdominis Muscles",
"Gluteus Maximus",
"Rectus Abdominis Muscle",
"Muscle Strength",
"Calf Muscles",
"Iliopsoas Muscles",
"Telemetry Data Analysis",
"S EMG Major Muscle Groups",
"S EMG Characteristics",
"Associated Major Muscle Groups",
"Agonist Muscles",
"Training",
"Data Analysis",
"Thigh",
"Muscles",
"Electromyography",
"Information Management",
"Telemetry",
"400 M Obstacle",
"Three Step Leap Over Stumps",
"Surface Electromyography Telemetry",
"Original Electromyography",
"I EMG",
"I EMG X 0025"
],
"authors": [
{
"affiliation": "Information Communication Institute, National University of Defense Technology,Xi'an,China",
"fullName": "Xiao-nan Wu",
"givenName": "Xiao-nan",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Information Communication Institute, National University of Defense Technology,Xi'an,China",
"fullName": "Mei Song",
"givenName": "Mei",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Information Communication Institute, National University of Defense Technology,Xi'an,China",
"fullName": "Bo Zhang",
"givenName": "Bo",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icekim",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "621-623",
"year": "2021",
"issn": null,
"isbn": "978-1-7281-6834-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "683400a616",
"articleId": "1vmLHrOlUUU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "683400a624",
"articleId": "1vmLIrElSOk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ipsn/2012/6469/0/06920973",
"title": "Poster abstract: MARS: A muscle activity recognition system using inertial sensors",
"doi": null,
"abstractUrl": "/proceedings-article/ipsn/2012/06920973/12OmNAm4TIH",
"parentPublication": {
"id": "proceedings/ipsn/2012/6469/0",
"title": "2012 ACM/IEEE 11th International Conference on Information Processing in Sensor Networks (IPSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2014/5179/0/06850827",
"title": "Analysis of fatigue conditions in triceps brachii muscle using sEMG signals and spectral correlation density function",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2014/06850827/12OmNvCi44j",
"parentPublication": {
"id": "proceedings/iciev/2014/5179/0",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iacsit-sc/2009/3653/0/3653a501",
"title": "A Prediction Method of Muscle Force Using sEMG",
"doi": null,
"abstractUrl": "/proceedings-article/iacsit-sc/2009/3653a501/12OmNxX3uxA",
"parentPublication": {
"id": "proceedings/iacsit-sc/2009/3653/0",
"title": "Computer Science and Information Technology, International Association of",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2016/8985/0/8985a605",
"title": "Preliminaly Study on Coordinated Movement Mechanism of Multiple Muscle Using Wavelet Coherence Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985a605/12OmNxymo4v",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hbdss/2021/2188/0/218800a086",
"title": "Telemetry Data Analysis on sEMG Major Muscle Groups in Hanging-up-and-passing-through Horizontal Ladder of 400m Obstacles",
"doi": null,
"abstractUrl": "/proceedings-article/hbdss/2021/218800a086/1AqwRXXF1fi",
"parentPublication": {
"id": "proceedings/hbdss/2021/2188/0",
"title": "2021 International Conference on Health Big Data and Smart Sports (HBDSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hbdss/2021/2188/0/218800a268",
"title": "Research on Lower Limb Motion Characteristics of Oblique Stride on “Euro Step” Layup Based on Intelligent Motion Capture System",
"doi": null,
"abstractUrl": "/proceedings-article/hbdss/2021/218800a268/1AqwSHB6zOo",
"parentPublication": {
"id": "proceedings/hbdss/2021/2188/0",
"title": "2021 International Conference on Health Big Data and Smart Sports (HBDSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995037",
"title": "Selective ensemble learning for cross-muscle ALS disease identification with EMG signal",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995037/1JC21jrg47K",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/2/260702a380",
"title": "Viscoelasticity Measurements of knee Muscles with Simulated Knee Osteoarthritis Treated by Novel Chinese Medicine: A Preliminary Study",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260702a380/1cYiqXjmssE",
"parentPublication": {
"id": "compsac/2019/2607/2",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2019/2286/0/228600a507",
"title": "LOWER-LIMB FOLLOW-UP: A Surface Electromyography Based Serious Computer Game and Patient Follow-Up System for Lower Extremity Muscle Strengthening Exercises in Physiotherapy and Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2019/228600a507/1cdO1onj03K",
"parentPublication": {
"id": "proceedings/cbms/2019/2286/0",
"title": "2019 IEEE 32nd International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2020/9429/0/942900a095",
"title": "Detection of Muscle Fatigue by Fusion of Agonist and Synergistic Muscle sEMG Signals",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2020/942900a095/1mLMgzNcY7e",
"parentPublication": {
"id": "proceedings/cbms/2020/9429/0",
"title": "2020 IEEE 33rd International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1wRI8oLPD8s",
"title": "2021 International Conference on Information Technology and Contemporary Sports (TCS)",
"acronym": "tcs",
"groupId": "1841564",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1wRIlE9zGne",
"doi": "10.1109/TCS52929.2021.00107",
"title": "Summary of core muscle strength and surface electromyography in patients with chronic nonspecific low back pain",
"normalizedTitle": "Summary of core muscle strength and surface electromyography in patients with chronic nonspecific low back pain",
"abstract": "In recent years, with the changes in people's lifestyles, the prevalence of non-specific low back pain is on the rise in China. Among them, middle-aged and elderly women, postpartum women, athletes and long-term sitting workers are the majority. If the patient does not receive effective treatment, it will not only seriously endanger personal health and cause unnecessary troubles to work and life, but also increase the medical burden and cause serious social and economic problems. At present, many scholars believe that the decline of lumbar spine stability and the changes in the biomechanical structure of the human body are one of the important mechanisms for the occurrence and development of chronic non-specific low back pain. Due to the special anatomical structure, the lumbar spine becomes the most vulnerable part of the spine. When the patient is in a state of poor posture for a long time, the nucleus and muscles attached to the lumbar spine, especially the transverse abdominis and multifidus muscles, will appear to varying degrees.\" \"Fatigue\", when \"fatigue\" reaches a certain level, \"disuse\" or even atrophy occurs, muscle strength will further decrease, and the stability of the lumbar spine will decrease, causing abnormalities between the lumbar muscles and nerve signal transmission, and eventually causing pain. Therefore, it is of great significance to study the relationship between non-specific low back pain, waist and abdominal muscle strength, electromyography and pain. This study studied the relationship between the waist and abdominal muscle strength, myoelectric performance and pain in patients with non-specific low back pain. The conclusions were drawn from the research results, and at the theoretical level, it provides a theoretical basis for the clinical treatment of non-specific low back pain. On the practical level, it is helpful to remind people to maintain lumbar spine stability and prevent chronic low back pain.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years, with the changes in people's lifestyles, the prevalence of non-specific low back pain is on the rise in China. Among them, middle-aged and elderly women, postpartum women, athletes and long-term sitting workers are the majority. If the patient does not receive effective treatment, it will not only seriously endanger personal health and cause unnecessary troubles to work and life, but also increase the medical burden and cause serious social and economic problems. At present, many scholars believe that the decline of lumbar spine stability and the changes in the biomechanical structure of the human body are one of the important mechanisms for the occurrence and development of chronic non-specific low back pain. Due to the special anatomical structure, the lumbar spine becomes the most vulnerable part of the spine. When the patient is in a state of poor posture for a long time, the nucleus and muscles attached to the lumbar spine, especially the transverse abdominis and multifidus muscles, will appear to varying degrees.\" \"Fatigue\", when \"fatigue\" reaches a certain level, \"disuse\" or even atrophy occurs, muscle strength will further decrease, and the stability of the lumbar spine will decrease, causing abnormalities between the lumbar muscles and nerve signal transmission, and eventually causing pain. Therefore, it is of great significance to study the relationship between non-specific low back pain, waist and abdominal muscle strength, electromyography and pain. This study studied the relationship between the waist and abdominal muscle strength, myoelectric performance and pain in patients with non-specific low back pain. The conclusions were drawn from the research results, and at the theoretical level, it provides a theoretical basis for the clinical treatment of non-specific low back pain. On the practical level, it is helpful to remind people to maintain lumbar spine stability and prevent chronic low back pain.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years, with the changes in people's lifestyles, the prevalence of non-specific low back pain is on the rise in China. Among them, middle-aged and elderly women, postpartum women, athletes and long-term sitting workers are the majority. If the patient does not receive effective treatment, it will not only seriously endanger personal health and cause unnecessary troubles to work and life, but also increase the medical burden and cause serious social and economic problems. At present, many scholars believe that the decline of lumbar spine stability and the changes in the biomechanical structure of the human body are one of the important mechanisms for the occurrence and development of chronic non-specific low back pain. Due to the special anatomical structure, the lumbar spine becomes the most vulnerable part of the spine. When the patient is in a state of poor posture for a long time, the nucleus and muscles attached to the lumbar spine, especially the transverse abdominis and multifidus muscles, will appear to varying degrees.\" \"Fatigue\", when \"fatigue\" reaches a certain level, \"disuse\" or even atrophy occurs, muscle strength will further decrease, and the stability of the lumbar spine will decrease, causing abnormalities between the lumbar muscles and nerve signal transmission, and eventually causing pain. Therefore, it is of great significance to study the relationship between non-specific low back pain, waist and abdominal muscle strength, electromyography and pain. This study studied the relationship between the waist and abdominal muscle strength, myoelectric performance and pain in patients with non-specific low back pain. The conclusions were drawn from the research results, and at the theoretical level, it provides a theoretical basis for the clinical treatment of non-specific low back pain. On the practical level, it is helpful to remind people to maintain lumbar spine stability and prevent chronic low back pain.",
"fno": "291000a502",
"keywords": [
"Biomechanics",
"Bone",
"Diseases",
"Electromyography",
"Ergonomics",
"Neurophysiology",
"Orthopaedics",
"Patient Treatment",
"Core Muscle Strength",
"Economic Problems",
"Lumbar Spine Stability",
"Chronic Nonspecific Low",
"Nucleus Muscles",
"Lumbar Muscles",
"Abdominal Muscle Strength",
"Pain",
"Spine",
"Senior Citizens",
"Muscles",
"Back",
"Fatigue",
"Electromyography",
"Nononspecific Low Back Pain",
"Core Muscle Strength",
"Surface Electromyography"
],
"authors": [
{
"affiliation": "Southwest Petroleum University,Institute of Physical Education,china",
"fullName": "Qinwen Guan",
"givenName": "Qinwen",
"surname": "Guan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Southwest Petroleum University,Institute of Physical Education,china",
"fullName": "Xiaoping Liu",
"givenName": "Xiaoping",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Southwest University,Institute of Physical Education,chongqing,china",
"fullName": "Haowei Liu",
"givenName": "Haowei",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "tcs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "502-507",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2910-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "291000a494",
"articleId": "1wRIhtxiAOQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "291000a508",
"articleId": "1wRIcibo9So",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2014/5669/0/06999349",
"title": "Effect of Chinese bone-seting for chronic low back pain",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2014/06999349/12OmNASILXd",
"parentPublication": {
"id": "proceedings/bibm/2014/5669/0",
"title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344578",
"title": "Pain level recognition using kinematics and muscle activity for physical rehabilitation in chronic pain",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344578/12OmNrJiCRg",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isms/2013/4963/0/4963a088",
"title": "Neural Network Based Spinal Age Estimation Using Lumbar Spine Magnetic Resonance Images (MRI)",
"doi": null,
"abstractUrl": "/proceedings-article/isms/2013/4963a088/12OmNwuvrUP",
"parentPublication": {
"id": "proceedings/isms/2013/4963/0",
"title": "Intelligent Systems, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bsn/2010/4065/0/4065a009",
"title": "Quantitative Assessment of the Motion of the Lumbar Spine and Pelvis with Wearable Inertial Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/bsn/2010/4065a009/12OmNxdDFGE",
"parentPublication": {
"id": "proceedings/bsn/2010/4065/0",
"title": "Wearable and Implantable Body Sensor Networks, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmu/2015/2612/0/07061044",
"title": "Continuous spine care service for elderly",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2015/07061044/12OmNyOHG6U",
"parentPublication": {
"id": "proceedings/icmu/2015/2612/0",
"title": "2015 Eighth International Conference on Mobile Computing and Ubiquitous Networking (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2010/4270/0/4270a760",
"title": "Study on Lumbar Spine Stresses during Manual Materials Handing Based on Finite Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270a760/12OmNyugz4r",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2016/04/07173007",
"title": "The Automatic Detection of Chronic Pain-Related Expression: Requirements, Challenges and the Multimodal EmoPain Dataset",
"doi": null,
"abstractUrl": "/journal/ta/2016/04/07173007/13rRUx0getK",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2018/7516/3/08550022",
"title": "Effect of Shoes on Lower Extremity Pain and Low Back Pain During Prolonged Standing on a Sloping Medium",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2018/08550022/17D45WWzW6s",
"parentPublication": {
"id": "proceedings/iciibms/2018/7516/3",
"title": "2018 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2019/3918/0/391800a234",
"title": "The Study of Functional Magnetic Resonance for Chronic Low Back Pain",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2019/391800a234/1gRxm0aaNgc",
"parentPublication": {
"id": "proceedings/itme/2019/3918/0",
"title": "2019 10th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a169",
"title": "Low Back Pain Attenuation Employing Virtual Reality Physiotherapy",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a169/1oZBzZUCp2M",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXGQKSUPm",
"doi": "10.1109/VRW52623.2021.00077",
"title": "Multisensory Teleportation in Virtual Reality Applications",
"normalizedTitle": "Multisensory Teleportation in Virtual Reality Applications",
"abstract": "This position paper aims to briefly summarise existing research in vection, teleportation and multisensory stimuli, present our cross-disciplinary research setup and argue towards the importance of discussing haptic feedback design in Virtual Reality (VR) locomotion techniques. In particular, haptic feedback stimulation has been shown to enhance the perception of-self motion when applied to various parts of the body. The recent developments of haptic devices opens the possibilities to explore \"whole-body\" haptics in virtual environments for locomotion techniques. We argue that crossmodal stimulation frameworks that have been already applied to study self-motion in VR could potentially provide benefits to locomotion studies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This position paper aims to briefly summarise existing research in vection, teleportation and multisensory stimuli, present our cross-disciplinary research setup and argue towards the importance of discussing haptic feedback design in Virtual Reality (VR) locomotion techniques. In particular, haptic feedback stimulation has been shown to enhance the perception of-self motion when applied to various parts of the body. The recent developments of haptic devices opens the possibilities to explore \"whole-body\" haptics in virtual environments for locomotion techniques. We argue that crossmodal stimulation frameworks that have been already applied to study self-motion in VR could potentially provide benefits to locomotion studies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This position paper aims to briefly summarise existing research in vection, teleportation and multisensory stimuli, present our cross-disciplinary research setup and argue towards the importance of discussing haptic feedback design in Virtual Reality (VR) locomotion techniques. In particular, haptic feedback stimulation has been shown to enhance the perception of-self motion when applied to various parts of the body. The recent developments of haptic devices opens the possibilities to explore \"whole-body\" haptics in virtual environments for locomotion techniques. We argue that crossmodal stimulation frameworks that have been already applied to study self-motion in VR could potentially provide benefits to locomotion studies.",
"fno": "405700a377",
"keywords": [
"Feedback",
"Haptic Interfaces",
"Virtual Reality",
"Multisensory Stimuli",
"Cross Disciplinary Research Setup",
"Haptic Feedback Design",
"Virtual Reality Locomotion",
"VR",
"Haptic Feedback Stimulation",
"Haptic Devices",
"Virtual Environments",
"Crossmodal Stimulation Frameworks",
"Locomotion Studies",
"Multisensory Teleportation",
"Virtual Reality",
"Three Dimensional Displays",
"Conferences",
"Virtual Environments",
"Teleportation",
"User Interfaces",
"Haptic Interfaces",
"Locomotion",
"Virtual Reality",
"Teleportation",
"Haptic",
"Multisensorial"
],
"authors": [
{
"affiliation": "Queen Mary University of London",
"fullName": "Francesco Soave",
"givenName": "Francesco",
"surname": "Soave",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Queen Mary University of London",
"fullName": "Ildar Farkhatdinov",
"givenName": "Ildar",
"surname": "Farkhatdinov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Queen Mary University of London",
"fullName": "Nick Bryan-Kinns",
"givenName": "Nick",
"surname": "Bryan-Kinns",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "377-379",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnXGMYqfo4",
"name": "pvrw202140570-09419121s1-mm_405700a377.zip",
"size": "2.33 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419121s1-mm_405700a377.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a375",
"articleId": "1tnXbVNaL9S",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a380",
"articleId": "1tnXc1raaxq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892335",
"title": "Designing intentional impossible spaces in virtual reality narratives: A case study",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892335/12OmNApcu9b",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892233",
"title": "VRRobot: Robot actuated props in an infinite virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892233/12OmNwkhTh6",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446280",
"title": "Enhancing the Stiffness Perception of Tangible Objects in Mixed Reality Using Wearable Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446280/13bd1AIBM2a",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260856",
"title": "NotifiVR: Exploring Interruptions and Notifications in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260856/13rRUxNmPDW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a238",
"title": "Comparing Teleportation Methods for Travel in Everyday Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a238/1CJdYyJV76E",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a530",
"title": "The Evaluation of Gait-Free Locomotion Methods with Eye Movement in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a530/1J7WtHqguHu",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798158",
"title": "PhantomLegs: Reducing Virtual Reality Sickness Using Head-Worn Haptic Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798158/1cJ16zT3GdW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090536",
"title": "Elastic-Move: Passive Haptic Device with Force Feedback for Virtual Reality Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090536/1jIxqFQXvSE",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09143472",
"title": "Do Multisensory Stimuli Benefit the Virtual Reality Experience? A Systematic Review",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09143472/1lxmwwX05lC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a608",
"title": "Walking and Teleportation in Wide-area Virtual Reality Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a608/1pysv8bIfrG",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXjaZXiw0",
"doi": "10.1109/VRW52623.2021.00057",
"title": "In Touch with Everyday Objects: Teleportation Techniques in Virtual Environments Supporting Tangibility",
"normalizedTitle": "In Touch with Everyday Objects: Teleportation Techniques in Virtual Environments Supporting Tangibility",
"abstract": "The application of virtual reality (VR) for everyday use is often limited due to the lack of tactile and kinesthetic feedback. To facilitate and expand the use of VR in daily life, it is possible to employ physical objects readily available at home as tangible objects to provide this missing feedback. For instance, a real chair can allow a user to sit within the virtual environment, even if the sitting place in the virtual world is not a chair. In home-based games, a real but not dangerous stick can provide the holding sensation of a virtual sword. These tracked objects in the real world can serve as a tangibility medium to their virtual counterparts, contributing to a higher sense of presence and immersion. However, such a solution relies on a consistent spatial relationship between the real and virtual space surrounding the user, which makes the basic use of virtual navigation techniques such as teleportation difficult. To allow the navigation on a large virtual environment while supporting a tangible interaction with real objects in a limited physical space at home, this paper explores three different teleportation techniques: to teleport the user, the object, or both of them to a new position accordingly while preserving the user and object's spatial relationship. All of these approaches enable tangible interaction when using a teleportation technique for navigation, but each one is more or less suitable in different situations of real/virtual spatial consistency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The application of virtual reality (VR) for everyday use is often limited due to the lack of tactile and kinesthetic feedback. To facilitate and expand the use of VR in daily life, it is possible to employ physical objects readily available at home as tangible objects to provide this missing feedback. For instance, a real chair can allow a user to sit within the virtual environment, even if the sitting place in the virtual world is not a chair. In home-based games, a real but not dangerous stick can provide the holding sensation of a virtual sword. These tracked objects in the real world can serve as a tangibility medium to their virtual counterparts, contributing to a higher sense of presence and immersion. However, such a solution relies on a consistent spatial relationship between the real and virtual space surrounding the user, which makes the basic use of virtual navigation techniques such as teleportation difficult. To allow the navigation on a large virtual environment while supporting a tangible interaction with real objects in a limited physical space at home, this paper explores three different teleportation techniques: to teleport the user, the object, or both of them to a new position accordingly while preserving the user and object's spatial relationship. All of these approaches enable tangible interaction when using a teleportation technique for navigation, but each one is more or less suitable in different situations of real/virtual spatial consistency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The application of virtual reality (VR) for everyday use is often limited due to the lack of tactile and kinesthetic feedback. To facilitate and expand the use of VR in daily life, it is possible to employ physical objects readily available at home as tangible objects to provide this missing feedback. For instance, a real chair can allow a user to sit within the virtual environment, even if the sitting place in the virtual world is not a chair. In home-based games, a real but not dangerous stick can provide the holding sensation of a virtual sword. These tracked objects in the real world can serve as a tangibility medium to their virtual counterparts, contributing to a higher sense of presence and immersion. However, such a solution relies on a consistent spatial relationship between the real and virtual space surrounding the user, which makes the basic use of virtual navigation techniques such as teleportation difficult. To allow the navigation on a large virtual environment while supporting a tangible interaction with real objects in a limited physical space at home, this paper explores three different teleportation techniques: to teleport the user, the object, or both of them to a new position accordingly while preserving the user and object's spatial relationship. All of these approaches enable tangible interaction when using a teleportation technique for navigation, but each one is more or less suitable in different situations of real/virtual spatial consistency.",
"fno": "405700a278",
"keywords": [
"Computer Games",
"Haptic Interfaces",
"Human Computer Interaction",
"Virtual Reality",
"Tactile Feedback",
"Virtual Sword",
"Virtual Navigation",
"Tangible Interaction",
"Teleportation",
"Virtual Environments",
"Virtual Reality",
"Home Based Games",
"Three Dimensional Displays",
"Navigation",
"Shape",
"Conferences",
"Virtual Environments",
"Teleportation",
"User Interfaces",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Interaction Techniques"
],
"authors": [
{
"affiliation": "University Paris-Saclay,CNRS, LISN, VENISE team,Orsay,France",
"fullName": "Yiran Zhang",
"givenName": "Yiran",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Paris-Saclay,CNRS, LISN, VENISE team,Orsay,France",
"fullName": "Sy-Thanh Ho",
"givenName": "Sy-Thanh",
"surname": "Ho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Paris-Saclay,CNRS, LISN, VENISE team,Orsay,France",
"fullName": "Nicolas Ladévèze",
"givenName": "Nicolas",
"surname": "Ladévèze",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Paris-Saclay,CNRS, LISN, VENISE team,Orsay,France",
"fullName": "Huyen Nguyen",
"givenName": "Huyen",
"surname": "Nguyen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Paris-Saclay,CNRS, Inria, LISN,Orsay,France",
"fullName": "Cédric Fleury",
"givenName": "Cédric",
"surname": "Fleury",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Paris-Saclay,CNRS, LISN, VENISE team,Orsay,France",
"fullName": "Patrick Bourdot",
"givenName": "Patrick",
"surname": "Bourdot",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "278-283",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a271",
"articleId": "1tnXmA2qUlW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a284",
"articleId": "1tnXZIKSGAM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892386",
"title": "Travel in large-scale head-worn VR: Pre-oriented teleportation with WIMs and previews",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892386/12OmNzhELm6",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a674",
"title": "Virtual Workspace Positioning Techniques during Teleportation for Co-located Collaboration in Virtual Reality using HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a674/1CJbVNhPGSI",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a238",
"title": "Comparing Teleportation Methods for Travel in Everyday Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a238/1CJdYyJV76E",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a317",
"title": "WriArm: Leveraging Wrist Movement to Design Wrist+Arm Based Teleportation in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a317/1JrRkBbpP1K",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049698",
"title": "Gaining the High Ground: Teleportation to Mid-Air Targets in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049698/1KYotugT0xW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a082",
"title": "WiM-Based Group Navigation for Collaborative Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a082/1KmFfzv6fWo",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797777",
"title": "Exploration of Large Omnidirectional Images in Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797777/1cJ0JISlXDG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a377",
"title": "Multisensory Teleportation in Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a377/1tnXGQKSUPm",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a480",
"title": "Analysis of Positional Tracking Space Usage when using Teleportation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a480/1tnXfrT4ere",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a321",
"title": "Simultaneous Real Walking and Asymmetric Input in Virtual Reality with a Smartphone-based Hybrid Interface",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a321/1yeQEyk3fbO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJcAVYrJew",
"doi": "10.1109/VRW55335.2022.00122",
"title": "Resolution Tradeoff in Gameplay Experience, Performance, and Simulator Sickness in Virtual Reality Games",
"normalizedTitle": "Resolution Tradeoff in Gameplay Experience, Performance, and Simulator Sickness in Virtual Reality Games",
"abstract": "Higher resolution is one of the main directions and drivers in the de-velopment of Virtual Reality (VR) Head-Mounted Displays (HMDs). However, given their associated higher cost, it is unclear the benefits of having higher resolution on user experience, especially in VR games. This research aims to investigate the effects of resolution in gameplay experience and simulator sickness for VR games. To this end, we designed an experiment to collect gameplay experience, simulator sickness (SS), and player performance data with a VR First-Person Shooter game. Our results indicate that 2K resolution is an important threshold for an enhanced gameplay experience without affecting performance and increasing SS levels.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Higher resolution is one of the main directions and drivers in the de-velopment of Virtual Reality (VR) Head-Mounted Displays (HMDs). However, given their associated higher cost, it is unclear the benefits of having higher resolution on user experience, especially in VR games. This research aims to investigate the effects of resolution in gameplay experience and simulator sickness for VR games. To this end, we designed an experiment to collect gameplay experience, simulator sickness (SS), and player performance data with a VR First-Person Shooter game. Our results indicate that 2K resolution is an important threshold for an enhanced gameplay experience without affecting performance and increasing SS levels.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Higher resolution is one of the main directions and drivers in the de-velopment of Virtual Reality (VR) Head-Mounted Displays (HMDs). However, given their associated higher cost, it is unclear the benefits of having higher resolution on user experience, especially in VR games. This research aims to investigate the effects of resolution in gameplay experience and simulator sickness for VR games. To this end, we designed an experiment to collect gameplay experience, simulator sickness (SS), and player performance data with a VR First-Person Shooter game. Our results indicate that 2K resolution is an important threshold for an enhanced gameplay experience without affecting performance and increasing SS levels.",
"fno": "840200a542",
"keywords": [
"Computer Games",
"Helmet Mounted Displays",
"Virtual Reality",
"Resolution Tradeoff",
"Simulator Sickness",
"Virtual Reality Games",
"Virtual Reality Head Mounted Displays",
"User Experience",
"Player Performance Data",
"Enhanced Gameplay Experience",
"VR First Person Shooter Game",
"HMD",
"Three Dimensional Displays",
"Head Mounted Displays",
"Costs",
"Conferences",
"Design Methodology",
"Games",
"Virtual Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality",
"Human Centered Computing Human Computer Interaction HCI HCI Design And Evaluation Methods User Studies"
],
"authors": [
{
"affiliation": "Xi'an Jiaotong-Liverpool University,Department of Computing,Suzhou,China,215028",
"fullName": "Jialin Wang",
"givenName": "Jialin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi'an Jiaotong-Liverpool University,Department of Computing,Suzhou,China,215028",
"fullName": "Rongkai Shi",
"givenName": "Rongkai",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi'an Jiaotong-Liverpool University,Department of Computing,Suzhou,China,215028",
"fullName": "Zehui Xiao",
"givenName": "Zehui",
"surname": "Xiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science and Technology, Shandong University,Qingdao,China,266237",
"fullName": "Xueying Qin",
"givenName": "Xueying",
"surname": "Qin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi'an Jiaotong-Liverpool University,Department of Computing,Suzhou,China,215028",
"fullName": "Hai-Ning Liang",
"givenName": "Hai-Ning",
"surname": "Liang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "542-543",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a540",
"articleId": "1CJe0tUBKU0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a544",
"articleId": "1CJcMmE19cY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446195",
"title": "Effects of Latency Jitter on Simulator Sickness in a Search Task",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446195/13bd1AIBM29",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2022/02/09779506",
"title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset",
"doi": null,
"abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798291",
"title": "Assessing Media QoE, Simulator Sickness and Presence for Omnidirectional Videos with Different Test Protocols",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798291/1cJ0GMB2sV2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798158",
"title": "PhantomLegs: Reducing Virtual Reality Sickness Using Head-Worn Haptic Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798158/1cJ16zT3GdW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090490",
"title": "Evaluation of Simulator Sickness for 360° Videos on an HMD Subject to Participants’ Experience with Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090490/1jIxwgIdgsw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a247",
"title": "Real-Time Detection of Simulator Sickness in Virtual Reality Games Based on Players' Psychophysiological Data during Gameplay",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a247/1pBMj6Ryu9q",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a735",
"title": "[DC] Towards Universal VR Sickness Mitigation Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a198",
"title": "Assessment of the Simulator Sickness Questionnaire for Omnidirectional Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a198/1tuB40QFm92",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pBMeBWXAZ2",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pBMj6Ryu9q",
"doi": "10.1109/ISMAR-Adjunct51615.2020.00071",
"title": "Real-Time Detection of Simulator Sickness in Virtual Reality Games Based on Players' Psychophysiological Data during Gameplay",
"normalizedTitle": "Real-Time Detection of Simulator Sickness in Virtual Reality Games Based on Players' Psychophysiological Data during Gameplay",
"abstract": "Virtual Reality (VR) technology has been proliferating in the last decade, especially in the last few years. However, Simulator Sickness (SS) still represents a significant problem for its wider adoption. Currently, the most common way to detect SS is using the Simulator Sickness Questionnaire (SSQ). SSQ is a subjective measurement and is inadequate for real-time applications such as VR games. This research aims to investigate how to use machine learning techniques to detect SS based on in-game characters' and users' physiological data during gameplay in VR games. To achieve this, we designed an experiment to collect such data with three types of games. We trained a Long Short-Term Memory neural network with the dataset eye-tracking and character movement data to detect SS in real-time. Our results indicate that, in VR games, our model is an accurate and efficient way to detect SS in real-time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality (VR) technology has been proliferating in the last decade, especially in the last few years. However, Simulator Sickness (SS) still represents a significant problem for its wider adoption. Currently, the most common way to detect SS is using the Simulator Sickness Questionnaire (SSQ). SSQ is a subjective measurement and is inadequate for real-time applications such as VR games. This research aims to investigate how to use machine learning techniques to detect SS based on in-game characters' and users' physiological data during gameplay in VR games. To achieve this, we designed an experiment to collect such data with three types of games. We trained a Long Short-Term Memory neural network with the dataset eye-tracking and character movement data to detect SS in real-time. Our results indicate that, in VR games, our model is an accurate and efficient way to detect SS in real-time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality (VR) technology has been proliferating in the last decade, especially in the last few years. However, Simulator Sickness (SS) still represents a significant problem for its wider adoption. Currently, the most common way to detect SS is using the Simulator Sickness Questionnaire (SSQ). SSQ is a subjective measurement and is inadequate for real-time applications such as VR games. This research aims to investigate how to use machine learning techniques to detect SS based on in-game characters' and users' physiological data during gameplay in VR games. To achieve this, we designed an experiment to collect such data with three types of games. We trained a Long Short-Term Memory neural network with the dataset eye-tracking and character movement data to detect SS in real-time. Our results indicate that, in VR games, our model is an accurate and efficient way to detect SS in real-time.",
"fno": "767500a247",
"keywords": [
"Computer Games",
"Learning Artificial Intelligence",
"Physiology",
"Psychology",
"Recurrent Neural Nets",
"Virtual Reality",
"Simulator Sickness Questionnaire",
"SSQ",
"Real Time Applications",
"VR Games",
"In Game Characters",
"Gameplay",
"Character Movement Data",
"Real Time Detection",
"Virtual Reality Games",
"Player Psychophysiological Data",
"Subjective Measurement",
"Machine Learning",
"Long Short Term Memory Neural Network",
"Eye Tracking",
"Solid Modeling",
"Analytical Models",
"Neural Networks",
"Games",
"Brain Modeling",
"Real Time Systems",
"Physiology",
"Virtual Reality",
"Gaming",
"Simulator Sickness",
"Machine Learning",
"EEG",
"Eye Tracking"
],
"authors": [
{
"affiliation": "Xi'an Jiaotong-Liverpool University",
"fullName": "Jialin Wang",
"givenName": "Jialin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi'an Jiaotong-Liverpool University",
"fullName": "Hai-Ning Liang",
"givenName": "Hai-Ning",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi'an Jiaotong-Liverpool University",
"fullName": "Diego Vilela Monteiro",
"givenName": "Diego Vilela",
"surname": "Monteiro",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi'an Jiaotong-Liverpool University",
"fullName": "Wenge Xu",
"givenName": "Wenge",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi'an Jiaotong-Liverpool University",
"fullName": "Hao Chen",
"givenName": "Hao",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi'an Jiaotong-Liverpool University",
"fullName": "Qiwen Chen",
"givenName": "Qiwen",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "247-248",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7675-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "767500a241",
"articleId": "1pBMgmkBHck",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "767500a249",
"articleId": "1pBMjRRAvtK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2002/1492/0/14920164",
"title": "Effects of Field of View on Presence, Enjoyment, Memory, and Simulator Sickness in a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920164/12OmNvUsoqB",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a147",
"title": "The Impacts of Animated-Virtual Actors' Visual Complexity and Simulator Sickness in Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a147/12OmNzvhvBh",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a542",
"title": "Resolution Tradeoff in Gameplay Experience, Performance, and Simulator Sickness in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a542/1CJcAVYrJew",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2022/02/09779506",
"title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset",
"doi": null,
"abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798213",
"title": "VR Sickness Prediction for Navigation in Immersive Virtual Environments using a Deep Long Short Term Memory Model",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798213/1cJ0RYruJIA",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08798880",
"title": "Sick Moves! Motion Parameters as Indicators of Simulator Sickness",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08798880/1cumZbd4qNG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090490",
"title": "Evaluation of Simulator Sickness for 360° Videos on an HMD Subject to Participants’ Experience with Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090490/1jIxwgIdgsw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a387",
"title": "Virtual reality sickness detection: an approach based on physiological signals and machine learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a387/1pysy0d2Nck",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412423",
"title": "VR Sickness Assessment with Perception Prior and Hybrid Temporal Features",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412423/1tmiMP82mre",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a198",
"title": "Assessment of the Simulator Sickness Questionnaire for Omnidirectional Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a198/1tuB40QFm92",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAf9n910Q",
"doi": "10.1109/VR50410.2021.00096",
"title": "Influence of Interactivity and Social Environments on User Experience and Social Acceptability in Virtual Reality",
"normalizedTitle": "Influence of Interactivity and Social Environments on User Experience and Social Acceptability in Virtual Reality",
"abstract": "Nowadays, Virtual Reality (VR) technology can be potentially used everywhere through wearable head-mounted displays. Nevertheless, it is still uncommon to see VR devices used in public settings. In these contexts, unaware bystanders in the surroundings might influence the User Experience (UX) and create concerns about the social acceptability of this technology. The user acts in a Social Environment (SE), characterized by surrounding people's number, proximity, and behavior. Simultaneously, VR applications often require a different degree of interactivity concerning body movements and controllers interaction. In this paper, the influence of Social Environments, and degree of interactivity on User Experience and social acceptability is investigated. Four Social Environments were simulated employing 360° Videos, and two VR games developed with two levels of interactivity. Results showed a statistically significant influence of Social Environments on Overall UX as well as Public VR, Interaction, Isolation, Privacy and Safety acceptability, and of the degree of interactivity on Presence, Valence, Arousal, Overall UX, UX Hedonic quality, and Safety acceptability. Findings indicate that Social Environments and degree of interactivity should be taken into account while designing VR applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Nowadays, Virtual Reality (VR) technology can be potentially used everywhere through wearable head-mounted displays. Nevertheless, it is still uncommon to see VR devices used in public settings. In these contexts, unaware bystanders in the surroundings might influence the User Experience (UX) and create concerns about the social acceptability of this technology. The user acts in a Social Environment (SE), characterized by surrounding people's number, proximity, and behavior. Simultaneously, VR applications often require a different degree of interactivity concerning body movements and controllers interaction. In this paper, the influence of Social Environments, and degree of interactivity on User Experience and social acceptability is investigated. Four Social Environments were simulated employing 360° Videos, and two VR games developed with two levels of interactivity. Results showed a statistically significant influence of Social Environments on Overall UX as well as Public VR, Interaction, Isolation, Privacy and Safety acceptability, and of the degree of interactivity on Presence, Valence, Arousal, Overall UX, UX Hedonic quality, and Safety acceptability. Findings indicate that Social Environments and degree of interactivity should be taken into account while designing VR applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Nowadays, Virtual Reality (VR) technology can be potentially used everywhere through wearable head-mounted displays. Nevertheless, it is still uncommon to see VR devices used in public settings. In these contexts, unaware bystanders in the surroundings might influence the User Experience (UX) and create concerns about the social acceptability of this technology. The user acts in a Social Environment (SE), characterized by surrounding people's number, proximity, and behavior. Simultaneously, VR applications often require a different degree of interactivity concerning body movements and controllers interaction. In this paper, the influence of Social Environments, and degree of interactivity on User Experience and social acceptability is investigated. Four Social Environments were simulated employing 360° Videos, and two VR games developed with two levels of interactivity. Results showed a statistically significant influence of Social Environments on Overall UX as well as Public VR, Interaction, Isolation, Privacy and Safety acceptability, and of the degree of interactivity on Presence, Valence, Arousal, Overall UX, UX Hedonic quality, and Safety acceptability. Findings indicate that Social Environments and degree of interactivity should be taken into account while designing VR applications.",
"fno": "255600a695",
"keywords": [
"Computer Games",
"Helmet Mounted Displays",
"Virtual Reality",
"Social Environment",
"User Experience",
"Social Acceptability",
"Virtual Reality Technology",
"VR Applications",
"Interactivity Concerning Body Movements",
"Safety Acceptability",
"Controllers Interaction",
"Privacy",
"Three Dimensional Displays",
"Virtual Reality",
"Games",
"User Interfaces",
"Particle Measurements",
"User Experience",
"Virtual Reality",
"Social Acceptability",
"User Experience",
"Social Environments",
"Interactivity",
"360 X 00 B 0 Videos"
],
"authors": [
{
"affiliation": "TU Berlin",
"fullName": "Maurizio Vergari",
"givenName": "Maurizio",
"surname": "Vergari",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Berlin",
"fullName": "Tanja Kojić",
"givenName": "Tanja",
"surname": "Kojić",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Politecnico di Milano",
"fullName": "Francesco Vona",
"givenName": "Francesco",
"surname": "Vona",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Politecnico di Milano",
"fullName": "Franca Garzotto",
"givenName": "Franca",
"surname": "Garzotto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Berlin & DFKI",
"fullName": "Sebastian Möller",
"givenName": "Sebastian",
"surname": "Möller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Berlin & DFKI",
"fullName": "Jan-Niklas Voigt-Antons",
"givenName": "Jan-Niklas",
"surname": "Voigt-Antons",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "695-704",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "255600a687",
"articleId": "1tuB6Ibu8j6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a705",
"articleId": "1tuB1hj1VhS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vs-games/2017/5812/0/08055811",
"title": "Learning mechanical engineering in a virtual workshop: A preliminary study on utilisability, utility and acceptability",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2017/08055811/12OmNCcKQOB",
"parentPublication": {
"id": "proceedings/vs-games/2017/5812/0",
"title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-seet/2022/9592/0/959200a146",
"title": "Integrating User Experience into Agile : An Experience Report on Lean UX and Scrum",
"doi": null,
"abstractUrl": "/proceedings-article/icse-seet/2022/959200a146/1EaOSmZQWK4",
"parentPublication": {
"id": "proceedings/icse-seet/2022/9592/0",
"title": "2022 IEEE/ACM 44th International Conference on Software Engineering: Software Engineering Education and Training (ICSE-SEET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a018",
"title": "Assessing the Effect of Interactivity Design In VR Based Second Language Learning Tool",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a018/1JrQSj43WV2",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a538",
"title": "CardsVR: A Two-Person VR Experience with Passive Haptic Feedback from a Deck of Playing Cards",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a538/1JrRaySJ7So",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2023/4544/0/10042781",
"title": "Acceptability and Trustworthiness of Virtual Agents by Effects of Theory of Mind and Social Skills Training",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2023/10042781/1KOuXUSrhHa",
"parentPublication": {
"id": "proceedings/fg/2023/4544/0",
"title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2018/0604/0/060400a001",
"title": "Prior Experience as an Influencer in the Momentary User Experience: An Assessment in Immersive Virtual Reality Game Context",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2018/060400a001/1cJ7zP2WN0c",
"parentPublication": {
"id": "proceedings/svr/2018/0604/0",
"title": "2018 20th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090522",
"title": "Gaze+Gesture Interface: Considering Social Acceptability",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090522/1jIxpZciOZy",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl-hcc/2020/6901/0/09127272",
"title": "Impact of Spatial Interface Traversal on Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vl-hcc/2020/09127272/1lvPZi9tk2c",
"parentPublication": {
"id": "proceedings/vl-hcc/2020/6901/0",
"title": "2020 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a147",
"title": "An Exploratory Study for Designing Social Experience of Watching VR Movies Based on Audience’s Voice Comments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a147/1pBMiVCpEGY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a437",
"title": "Focus Group on Social Virtual Reality in Social Virtual Reality: Effects on Emotion and Self-Awareness",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a437/1yeQD8KNChO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgohS",
"title": "2018 Nicograph International (NicoInt)",
"acronym": "nicoint",
"groupId": "1814784",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1gzWkRB",
"doi": "10.1109/NICOINT.2018.00023",
"title": "Development of Communication Tools for Informed Consent Using VR Technology",
"normalizedTitle": "Development of Communication Tools for Informed Consent Using VR Technology",
"abstract": "Since prostate cancer patients tend to be elderly, doctors may need more time to explain a consent to them, so that they may understand it and sign it. The most direct way to help facilitate this communication is through visualization, specifically using VR tools to describe visually to the patients their prostate, their medical condition as well as the necessary surgical procedures. Our goal is to provide a visualization technology tool which makes medically-related communication between doctors and patients easy and efficient.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Since prostate cancer patients tend to be elderly, doctors may need more time to explain a consent to them, so that they may understand it and sign it. The most direct way to help facilitate this communication is through visualization, specifically using VR tools to describe visually to the patients their prostate, their medical condition as well as the necessary surgical procedures. Our goal is to provide a visualization technology tool which makes medically-related communication between doctors and patients easy and efficient.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Since prostate cancer patients tend to be elderly, doctors may need more time to explain a consent to them, so that they may understand it and sign it. The most direct way to help facilitate this communication is through visualization, specifically using VR tools to describe visually to the patients their prostate, their medical condition as well as the necessary surgical procedures. Our goal is to provide a visualization technology tool which makes medically-related communication between doctors and patients easy and efficient.",
"fno": "690901a074",
"keywords": [
"Biological Organs",
"Cancer",
"Medical Administrative Data Processing",
"Medical Computing",
"Medical Information Systems",
"Surgery",
"Telemedicine",
"Virtual Reality",
"Necessary Surgical Procedures",
"Visualization Technology Tool",
"Doctors",
"Communication Tools",
"Informed Consent",
"VR Technology",
"Prostate Cancer Patients",
"VR Tools",
"Medical Condition",
"Tools",
"Surgery",
"Biopsy",
"Prostate Cancer",
"Visualization",
"Biomedical Imaging",
"Communication",
"Medical Field",
"Informed Consent",
"VR Tool"
],
"authors": [
{
"affiliation": null,
"fullName": "Reika Sato",
"givenName": "Reika",
"surname": "Sato",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhejun Liu",
"givenName": "Zhejun",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kenji Yoshida",
"givenName": "Kenji",
"surname": "Yoshida",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kenta Takayasu",
"givenName": "Kenta",
"surname": "Takayasu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "nicoint",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "74-77",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6909-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "690901a070",
"articleId": "13bd1eTtWYr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "690901a078",
"articleId": "13bd1fKQxsj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/asea/2014/7760/0/07023890",
"title": "A Design of Efficient Medical Information System to Enhance Health Behaviors After Radical Prostatectomy",
"doi": null,
"abstractUrl": "/proceedings-article/asea/2014/07023890/12OmNBrlPzE",
"parentPublication": {
"id": "proceedings/asea/2014/7760/0",
"title": "2014 7th International Conference on Advanced Software Engineering and Its Applications (ASEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/1995/7117/0/71170094",
"title": "Prostate Ultrasound Image Analysis: Localization of Cancer Lesions to Assist Biopsy",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1995/71170094/12OmNC4wtti",
"parentPublication": {
"id": "proceedings/cbms/1995/7117/0",
"title": "Proceedings Eighth IEEE Symposium on Computer-Based Medical Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2004/2173/0/21730191",
"title": "Quantitation of Extra-Capsular Prostate Tissue from Reconstructed Tissue Images",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2004/21730191/12OmNvHoQpz",
"parentPublication": {
"id": "proceedings/bibe/2004/2173/0",
"title": "Fourth IEEE Symposium on Bioinformatics and Bioengineering (BIBE'04)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/niss/2009/3687/0/3687b063",
"title": "Applying Data Mining for Prostate Cancer",
"doi": null,
"abstractUrl": "/proceedings-article/niss/2009/3687b063/12OmNzC5SNj",
"parentPublication": {
"id": "proceedings/niss/2009/3687/0",
"title": "2009 International Conference on New Trends in Information and Service Science (NISS 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2018/5377/0/537701a380",
"title": "Mapping the Treatment Journey for Patients with Prostate Cancer",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2018/537701a380/12OmNzSQdji",
"parentPublication": {
"id": "proceedings/ichi/2018/5377/0",
"title": "2018 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2016/9036/0/9036a013",
"title": "Digital PI-RADS: Smartphone Sketches for Instant Knowledge Acquisition in Prostate Cancer Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2016/9036a013/12OmNzZEAoY",
"parentPublication": {
"id": "proceedings/cbms/2016/9036/0",
"title": "2016 IEEE 29th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2019/03/08329998",
"title": "Differentiating Prostate Cancer from Benign Prostatic Hyperplasia Using PSAD Based on Machine Learning: Single-Center Retrospective Study in China",
"doi": null,
"abstractUrl": "/journal/tb/2019/03/08329998/13rRUIIVlaY",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/03/mcg2015030044",
"title": "A Visual-Interactive System for Prostate Cancer Cohort Analysis",
"doi": null,
"abstractUrl": "/magazine/cg/2015/03/mcg2015030044/13rRUwInv6U",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539638",
"title": "PROACT: Iterative Design of a Patient-Centered Visualization for Effective Prostate Cancer Health Risk Communication",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539638/13rRUxYINfk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vahc/2020/2644/0/264400a017",
"title": "Visualization Co-Design with Prostate Cancer Survivors who have Limited Graph Literacy",
"doi": null,
"abstractUrl": "/proceedings-article/vahc/2020/264400a017/1yhFE7okzgk",
"parentPublication": {
"id": "proceedings/vahc/2020/2644/0",
"title": "2020 Workshop on Visual Analytics in Healthcare (VAHC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJeWej7kcg",
"doi": "10.1109/VRW55335.2022.00068",
"title": "Reading Social Media Marketing Messages as Simulated Self Within a Metaverse: An Analysis of Gaze and Social Media Engagement Behaviors within a Metaverse Platform",
"normalizedTitle": "Reading Social Media Marketing Messages as Simulated Self Within a Metaverse: An Analysis of Gaze and Social Media Engagement Behaviors within a Metaverse Platform",
"abstract": "The current paper discusses how individuals will process social media content within the metaverse world. Also, the current paper will propose an exploratory study that is designed to provide preliminary evidence regarding how individuals cognitively and emotionally process social media posts embedded in a metaverse platform where they experience the simulation of becoming their desired or positive “future self”. From data obtained from the gaze tracking and social media engagement metrics that measure users' simulated attentional and engagement behaviors, the author will examine to what extent the different temporal distances of virtual or simulated self (present vs near- vs far-future self) and the actual self (lowly vs highly conscientious self) interactively influences the durations of attention (duration of viewing the posts) to and the engagement (i.e., clicking “like” button for the posts) in different social media posts (e.g., news feeds, positive vs negative dog pictures in native adverts, a health marketing post) seen in the virtual computer screen within the virtual room. In general, it is expected that the father the temporal distance, the longer the duration of the attention to the social media posts. This effect can be moderated by the actual self-views. That is, for participants with negative self-views, the farther the simulated temporal distance, the shorter the duration of attention to the social media posts relevant for the positive self within the metaverse environment. The opposite results are expected to be observed from individuals with positive self-views: the farther the simulated temporal distance, the longer the duration of attention to the social media posts seen within the non-real or metaverse world. The analysis and the plan for the presentation at 2022 IEEE VR workshop is provided in this paper.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The current paper discusses how individuals will process social media content within the metaverse world. Also, the current paper will propose an exploratory study that is designed to provide preliminary evidence regarding how individuals cognitively and emotionally process social media posts embedded in a metaverse platform where they experience the simulation of becoming their desired or positive “future self”. From data obtained from the gaze tracking and social media engagement metrics that measure users' simulated attentional and engagement behaviors, the author will examine to what extent the different temporal distances of virtual or simulated self (present vs near- vs far-future self) and the actual self (lowly vs highly conscientious self) interactively influences the durations of attention (duration of viewing the posts) to and the engagement (i.e., clicking “like” button for the posts) in different social media posts (e.g., news feeds, positive vs negative dog pictures in native adverts, a health marketing post) seen in the virtual computer screen within the virtual room. In general, it is expected that the father the temporal distance, the longer the duration of the attention to the social media posts. This effect can be moderated by the actual self-views. That is, for participants with negative self-views, the farther the simulated temporal distance, the shorter the duration of attention to the social media posts relevant for the positive self within the metaverse environment. The opposite results are expected to be observed from individuals with positive self-views: the farther the simulated temporal distance, the longer the duration of attention to the social media posts seen within the non-real or metaverse world. The analysis and the plan for the presentation at 2022 IEEE VR workshop is provided in this paper.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The current paper discusses how individuals will process social media content within the metaverse world. Also, the current paper will propose an exploratory study that is designed to provide preliminary evidence regarding how individuals cognitively and emotionally process social media posts embedded in a metaverse platform where they experience the simulation of becoming their desired or positive “future self”. From data obtained from the gaze tracking and social media engagement metrics that measure users' simulated attentional and engagement behaviors, the author will examine to what extent the different temporal distances of virtual or simulated self (present vs near- vs far-future self) and the actual self (lowly vs highly conscientious self) interactively influences the durations of attention (duration of viewing the posts) to and the engagement (i.e., clicking “like” button for the posts) in different social media posts (e.g., news feeds, positive vs negative dog pictures in native adverts, a health marketing post) seen in the virtual computer screen within the virtual room. In general, it is expected that the father the temporal distance, the longer the duration of the attention to the social media posts. This effect can be moderated by the actual self-views. That is, for participants with negative self-views, the farther the simulated temporal distance, the shorter the duration of attention to the social media posts relevant for the positive self within the metaverse environment. The opposite results are expected to be observed from individuals with positive self-views: the farther the simulated temporal distance, the longer the duration of attention to the social media posts seen within the non-real or metaverse world. The analysis and the plan for the presentation at 2022 IEEE VR workshop is provided in this paper.",
"fno": "840200a301",
"keywords": [
"Social Networking Online",
"Virtual Reality",
"Social Media Content",
"Metaverse World",
"Metaverse Platform",
"Social Media Engagement Metrics",
"Social Media Marketing Messages",
"Social Media Engagement Behaviors",
"Simulated Temporal Distance",
"Social Media Posts",
"Measurement",
"Three Dimensional Displays",
"Social Networking Online",
"Conferences",
"Gaze Tracking",
"Dogs",
"User Interfaces",
"Metaverse",
"Information Processing",
"Time Travel",
"Identity"
],
"authors": [
{
"affiliation": "College of Business Northern Illinois University,Department of Marketing",
"fullName": "Yongwoog Andy Jeon",
"givenName": "Yongwoog Andy",
"surname": "Jeon",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "301-303",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a299",
"articleId": "1CJfbuK0Yfe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a304",
"articleId": "1CJetSxfyi4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/eitt/2021/2757/0/275700a257",
"title": "The Relationship between Social Media Self-efficacy and Learning Engagement of College Students",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2021/275700a257/1AFspbx1NKM",
"parentPublication": {
"id": "proceedings/eitt/2021/2757/0",
"title": "2021 Tenth International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tps-isa/2021/1623/0/162300a281",
"title": "Metaverse: Security and Privacy Issues",
"doi": null,
"abstractUrl": "/proceedings-article/tps-isa/2021/162300a281/1CzeyJ59aNi",
"parentPublication": {
"id": "proceedings/tps-isa/2021/1623/0",
"title": "2021 Third IEEE International Conference on Trust, Privacy and Security in Intelligent Systems and Applications (TPS-ISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020677",
"title": "Predicting and Analyzing Privacy Settings and Categories for Posts on Social Media",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020677/1KfRB2eitA4",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2022/7532/0/753200a337",
"title": "Privacy-Preserving Social Media with a Disclosure",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2022/753200a337/1LAz1YbdrFu",
"parentPublication": {
"id": "proceedings/candarw/2022/7532/0",
"title": "2022 Tenth International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcomp/2019/1689/0/168900a121",
"title": "Relevancy Classification of Multimodal Social Media Streams for Emergency Services",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2019/168900a121/1cdOrSTKmDS",
"parentPublication": {
"id": "proceedings/smartcomp/2019/1689/0",
"title": "2019 IEEE International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2019/10/08848135",
"title": "Can Information Hiding in Social Media Posts Represent a Threat?",
"doi": null,
"abstractUrl": "/magazine/co/2019/10/08848135/1dAq0SIK9KE",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378092",
"title": "Utilizing Social Media for Identifying Drug Addiction and Recovery Intervention",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378092/1s647zmDSrm",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09377758",
"title": "Forecasting People’s Action via Social Media Data",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09377758/1s64Iz2C1Tq",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2021/0019/0/09597431",
"title": "Detecting Inspiring Content on Social Media",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2021/09597431/1yylfqgb0Xe",
"parentPublication": {
"id": "proceedings/acii/2021/0019/0",
"title": "2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2021/3574/0/357400b546",
"title": "MediNER: Understanding Diabetes Management Strategies Based on Social Media Discourse",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2021/357400b546/1zxL7VvJNHG",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2021/3574/0",
"title": "2021 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H445ZQ8vok",
"title": "2022 IEEE/ACIS 7th International Conference on Big Data, Cloud Computing, and Data Science (BCD)",
"acronym": "bcd",
"groupId": "1828424",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H4470uF4re",
"doi": "10.1109/BCD54882.2022.9900629",
"title": "Metaverse Learning: The Relationship among Quality of VR-Based Education, Self-Determination, and Learner Satisfaction",
"normalizedTitle": "Metaverse Learning: The Relationship among Quality of VR-Based Education, Self-Determination, and Learner Satisfaction",
"abstract": "While online learning had traditionally been implemented to aid in-person education, it has recently evolved into a critical tool for remote education. In particular, the advent of the COVID-19 pandemic has accelerated the spread of online learning and its significance. However, this wave of innovation in the education field has revealed the lack of research on how online education should be systematically provided and which educational aspects should be considered to enhance learner satisfaction in online settings. With the recent emergence of the Metaverse, VR education is once again proposed as a major tool for online learning despite existing limitations in research which put both systematic and educational aspects into consideration. Hence, this study presents and discusses a research model that converges technology acceptance model, information systems success model, and self-determination theory with the purpose of exploring variables that affect learner satisfaction, mediating the flow theory. Results showed that most variables of both self-determination theory and information system quality within VR education impact learner satisfaction. In particular, learners’ flow – complete immersion in learning – functions as an important mediating variable for learner satisfaction. These findings suggest designing and running a systematic platform that reflects self-directed learning is imperative to bring the best educational practices into the Metaverse.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While online learning had traditionally been implemented to aid in-person education, it has recently evolved into a critical tool for remote education. In particular, the advent of the COVID-19 pandemic has accelerated the spread of online learning and its significance. However, this wave of innovation in the education field has revealed the lack of research on how online education should be systematically provided and which educational aspects should be considered to enhance learner satisfaction in online settings. With the recent emergence of the Metaverse, VR education is once again proposed as a major tool for online learning despite existing limitations in research which put both systematic and educational aspects into consideration. Hence, this study presents and discusses a research model that converges technology acceptance model, information systems success model, and self-determination theory with the purpose of exploring variables that affect learner satisfaction, mediating the flow theory. Results showed that most variables of both self-determination theory and information system quality within VR education impact learner satisfaction. In particular, learners’ flow – complete immersion in learning – functions as an important mediating variable for learner satisfaction. These findings suggest designing and running a systematic platform that reflects self-directed learning is imperative to bring the best educational practices into the Metaverse.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While online learning had traditionally been implemented to aid in-person education, it has recently evolved into a critical tool for remote education. In particular, the advent of the COVID-19 pandemic has accelerated the spread of online learning and its significance. However, this wave of innovation in the education field has revealed the lack of research on how online education should be systematically provided and which educational aspects should be considered to enhance learner satisfaction in online settings. With the recent emergence of the Metaverse, VR education is once again proposed as a major tool for online learning despite existing limitations in research which put both systematic and educational aspects into consideration. Hence, this study presents and discusses a research model that converges technology acceptance model, information systems success model, and self-determination theory with the purpose of exploring variables that affect learner satisfaction, mediating the flow theory. Results showed that most variables of both self-determination theory and information system quality within VR education impact learner satisfaction. In particular, learners’ flow – complete immersion in learning – functions as an important mediating variable for learner satisfaction. These findings suggest designing and running a systematic platform that reflects self-directed learning is imperative to bring the best educational practices into the Metaverse.",
"fno": "09900629",
"keywords": [
"Computer Aided Instruction",
"Information Systems",
"Technology Acceptance Model",
"Virtual Reality",
"Educational Practices",
"Metaverse Learning",
"Online Learning",
"In Person Education",
"Critical Tool",
"Remote Education",
"COVID 19 Pandemic",
"Education Field",
"Online Education",
"Educational Aspects",
"Online Settings",
"Research Model",
"Technology Acceptance Model",
"Information Systems Success Model",
"Self Determination Theory",
"Information System Quality",
"VR Based Education Quality",
"Learner Satisfaction",
"Performance Evaluation",
"Technological Innovation",
"Technology Acceptance Model",
"Systematics",
"Metaverse",
"Computational Modeling",
"Education",
"Virtual Reality",
"Metaverse",
"Self Determination Theory",
"Flow",
"Technology Acceptance Model",
"Information Systems Success Model"
],
"authors": [
{
"affiliation": "Next Generation R&D Technology Policy Institute, Soongsil University,Seoul,Korea",
"fullName": "Gina Gim",
"givenName": "Gina",
"surname": "Gim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Soongsil University,Programs in Project Management,Seoul,Korea",
"fullName": "Hoikyoung Bae",
"givenName": "Hoikyoung",
"surname": "Bae",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Soongsil University,Programs in Project Management,Seoul,Korea",
"fullName": "Seona Kang",
"givenName": "Seona",
"surname": "Kang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bcd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "279-284",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6582-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09900793",
"articleId": "1H44ctiCpdC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09900618",
"articleId": "1H44gi4USkg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acie/2022/7973/0/797300a048",
"title": "Digital Media and VR Art Creation for Metaverse",
"doi": null,
"abstractUrl": "/proceedings-article/acie/2022/797300a048/1FiydByZ4ti",
"parentPublication": {
"id": "proceedings/acie/2022/7973/0",
"title": "2022 2nd Asia Conference on Information Engineering (ACIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a090",
"title": "Innovation and Prospect of Digital Scene Setting in the Context of Metaverse",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a090/1H2phdYgv4I",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bcd/2022/6582/0/09900579",
"title": "Metaverse Current Status and Prospects: Focusing on Metaverse Field Cases",
"doi": null,
"abstractUrl": "/proceedings-article/bcd/2022/09900579/1H44caaoIE0",
"parentPublication": {
"id": "proceedings/bcd/2022/6582/0",
"title": "2022 IEEE/ACIS 7th International Conference on Big Data, Cloud Computing, and Data Science (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bcd/2022/6582/0/09900871",
"title": "A Conceptual Framework for Determining Metaverse Adoption in Vietnam IT Enterprises",
"doi": null,
"abstractUrl": "/proceedings-article/bcd/2022/09900871/1H44dvFS9aM",
"parentPublication": {
"id": "proceedings/bcd/2022/6582/0",
"title": "2022 IEEE/ACIS 7th International Conference on Big Data, Cloud Computing, and Data Science (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2022/06/10017411",
"title": "The Metaverse and Higher Education Institutions",
"doi": null,
"abstractUrl": "/magazine/it/2022/06/10017411/1JYZE6WWG3e",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10021004",
"title": "Metaverse in Education: Vision, Opportunities, and Challenges",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10021004/1KfT4Lj5RDi",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2022/1666/0/166600a592",
"title": "Research on Teaching of Metaverse Technology Flipped the MICE Education",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2022/166600a592/1KpBrh7ycuc",
"parentPublication": {
"id": "proceedings/icekim/2022/1666/0",
"title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2023/5719/0/10074583",
"title": "Metaverse-based education service adoption and preference study using conjoint analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2023/10074583/1LKwIQCvo2Y",
"parentPublication": {
"id": "proceedings/icnc/2023/5719/0",
"title": "2023 International Conference on Computing, Networking and Communications (ICNC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2022/1015/0/101500a104",
"title": "Metaverse Teaching Overview",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2022/101500a104/1M4rxOG2Buo",
"parentPublication": {
"id": "proceedings/itme/2022/1015/0",
"title": "2022 12th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2022/1015/0/101500a714",
"title": "Educational Metaverse Dilemmas and Solutions: a stakeholder-based perspective",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2022/101500a714/1M4rxqxzlKg",
"parentPublication": {
"id": "proceedings/itme/2022/1015/0",
"title": "2022 12th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H445ZQ8vok",
"title": "2022 IEEE/ACIS 7th International Conference on Big Data, Cloud Computing, and Data Science (BCD)",
"acronym": "bcd",
"groupId": "1828424",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H44ctiCpdC",
"doi": "10.1109/BCD54882.2022.9900793",
"title": "Developing Songjeong Metaverse Surfing Village: Development of Metaverse-based Platform Specialized for Marine Tourism",
"normalizedTitle": "Developing Songjeong Metaverse Surfing Village: Development of Metaverse-based Platform Specialized for Marine Tourism",
"abstract": "This study aims to introduce and explain how to develop a metaverse based immersive and experiential tourist contents. The project plans to introduce long-term and highly scalable new concept platform by developing virtual metaverse surfing village centered on Song-Jeong beach which is popular surfing destination in South Korea. The project consists of the following 5 tasks: The first Task is to develop Metaverse-based hyper-immersive interactive content which include implementation of surfing village virtualization by identifying an analyzing the components through spatial analysis. Second Task is developing VR/AR based experiential content. In this task, AI based service contents that can provide optimized surfing posture and correction service and virtual surfing experience contents will be developed. The third task is developing contents that provide integrated information linked to local tourism and leisure infrastructure. To do that AI based smart tourism service will be developed. The fourth task is to develop business model for interactive metaverse platform. Lastly, the fifth task is demonstration stage of the platform. This task includes promotion of demonstration platform, operating expert advisory group, and establishment of longterm expansion plan.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study aims to introduce and explain how to develop a metaverse based immersive and experiential tourist contents. The project plans to introduce long-term and highly scalable new concept platform by developing virtual metaverse surfing village centered on Song-Jeong beach which is popular surfing destination in South Korea. The project consists of the following 5 tasks: The first Task is to develop Metaverse-based hyper-immersive interactive content which include implementation of surfing village virtualization by identifying an analyzing the components through spatial analysis. Second Task is developing VR/AR based experiential content. In this task, AI based service contents that can provide optimized surfing posture and correction service and virtual surfing experience contents will be developed. The third task is developing contents that provide integrated information linked to local tourism and leisure infrastructure. To do that AI based smart tourism service will be developed. The fourth task is to develop business model for interactive metaverse platform. Lastly, the fifth task is demonstration stage of the platform. This task includes promotion of demonstration platform, operating expert advisory group, and establishment of longterm expansion plan.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study aims to introduce and explain how to develop a metaverse based immersive and experiential tourist contents. The project plans to introduce long-term and highly scalable new concept platform by developing virtual metaverse surfing village centered on Song-Jeong beach which is popular surfing destination in South Korea. The project consists of the following 5 tasks: The first Task is to develop Metaverse-based hyper-immersive interactive content which include implementation of surfing village virtualization by identifying an analyzing the components through spatial analysis. Second Task is developing VR/AR based experiential content. In this task, AI based service contents that can provide optimized surfing posture and correction service and virtual surfing experience contents will be developed. The third task is developing contents that provide integrated information linked to local tourism and leisure infrastructure. To do that AI based smart tourism service will be developed. The fourth task is to develop business model for interactive metaverse platform. Lastly, the fifth task is demonstration stage of the platform. This task includes promotion of demonstration platform, operating expert advisory group, and establishment of longterm expansion plan.",
"fno": "09900793",
"keywords": [
"Internet",
"Mobile Computing",
"Travel Industry",
"Ubiquitous Computing",
"Virtual Reality",
"Songjeong Metaverse Surfing Village",
"Metaverse Based Platform Specialized",
"Marine Tourism",
"Immersive Tourist Contents",
"Experiential Tourist Contents",
"Highly Scalable New Concept Platform",
"Virtual Metaverse Surfing Village",
"Song Jeong Beach",
"Popular Surfing Destination",
"South Korea",
"Hyper Immersive Interactive Content",
"Village Virtualization",
"Experiential Content",
"AI",
"Service Contents",
"Optimized Surfing Posture",
"Correction Service",
"Virtual Surfing Experience Contents",
"Local Tourism",
"Leisure Infrastructure",
"Smart Tourism Service",
"Fourth Task",
"Interactive Metaverse Platform",
"Fifth Task",
"Demonstration Platform",
"Road Transportation",
"Observatories",
"Metaverse",
"Urban Areas",
"Tourism Industry",
"Space Exploration",
"Task Analysis",
"Metaverse",
"Surfing",
"Marine Tourism"
],
"authors": [
{
"affiliation": "Dong-Eui University,Dept. of Hotel & Convention Management,Busanl,South Korea",
"fullName": "Tae Hwan Yoon",
"givenName": "Tae Hwan",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dong-Eui University,Graduate School of Engineering,Dept. of Artificial Intelligence,Busan,South Korea",
"fullName": "Jong Kyu Do",
"givenName": "Jong Kyu",
"surname": "Do",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dong-Eui University,College of Commerce and Economics & AI Grand ICT Research Center,Dept. of e-Business,Busan,South Korea",
"fullName": "Seok Chan Jeong",
"givenName": "Seok Chan",
"surname": "Jeong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bcd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "276-278",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6582-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09900693",
"articleId": "1H449Y5rruo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09900629",
"articleId": "1H4470uF4re",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2012/4814/0/4814a296",
"title": "Further Dimensions: Text, Typography and Play in the Metaverse",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2012/4814a296/12OmNCf1Dls",
"parentPublication": {
"id": "proceedings/cw/2012/4814/0",
"title": "2012 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bcd/2022/6582/0/09900806",
"title": "Beauty Industry's Strategic Response to Metaverse Evolution: Focused on Generation MZ",
"doi": null,
"abstractUrl": "/proceedings-article/bcd/2022/09900806/1H44grhsBLW",
"parentPublication": {
"id": "proceedings/bcd/2022/6582/0",
"title": "2022 IEEE/ACIS 7th International Conference on Big Data, Cloud Computing, and Data Science (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2022/7180/0/718000a620",
"title": "Blockchain-based Edge Resource Sharing for Metaverse",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2022/718000a620/1JeE1ga6Eb6",
"parentPublication": {
"id": "proceedings/mass/2022/7180/0",
"title": "2022 IEEE 19th International Conference on Mobile Ad Hoc and Smart Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2022/1666/0/166600a592",
"title": "Research on Teaching of Metaverse Technology Flipped the MICE Education",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2022/166600a592/1KpBrh7ycuc",
"parentPublication": {
"id": "proceedings/icekim/2022/1666/0",
"title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2022/6457/0/645700a841",
"title": "MetaSpeech: Speech Effects Switch Along with Environment for Metaverse",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2022/645700a841/1LUtJIYBE2c",
"parentPublication": {
"id": "proceedings/msn/2022/6457/0",
"title": "2022 18th International Conference on Mobility, Sensing and Networking (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1IFJxbFw9Vu",
"title": "2022 IEEE 42nd International Conference on Distributed Computing Systems Workshops (ICDCSW)",
"acronym": "icdcsw",
"groupId": "9951324",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1IFJFhQc360",
"doi": "10.1109/ICDCSW56584.2022.00053",
"title": "Re-shaping Post-COVID-19 Teaching and Learning: A Blueprint of Virtual-Physical Blended Classrooms in the Metaverse Era",
"normalizedTitle": "Re-shaping Post-COVID-19 Teaching and Learning: A Blueprint of Virtual-Physical Blended Classrooms in the Metaverse Era",
"abstract": "During the COVID-19 pandemic, most countries have experienced some form of remote education through video conferencing software platforms. However, these software platforms fail to reduce immersion and replicate the classroom experience. The currently emerging Metaverse addresses many of such limitations by offering blended physical-digital environments. This paper aims to assess how the Metaverse can support and improve e-learning. We first survey the latest applications of blended environments in education and highlight the primary challenges and opportunities. Accordingly, we derive our proposal for a virtual-physical blended classroom configuration that brings students and teachers into a shared educational Metaverse. We focus on the system architecture of the Metaverse classroom to achieve real-time synchronization of a large number of participants and activities across physical (mixed reality classrooms) and virtual (remote VR platform) learning spaces. Our proposal attempts to transform the traditional physical classroom into virtual-physical cyberspace as a new social network of learners and educators connected at an unprecedented scale.",
"abstracts": [
{
"abstractType": "Regular",
"content": "During the COVID-19 pandemic, most countries have experienced some form of remote education through video conferencing software platforms. However, these software platforms fail to reduce immersion and replicate the classroom experience. The currently emerging Metaverse addresses many of such limitations by offering blended physical-digital environments. This paper aims to assess how the Metaverse can support and improve e-learning. We first survey the latest applications of blended environments in education and highlight the primary challenges and opportunities. Accordingly, we derive our proposal for a virtual-physical blended classroom configuration that brings students and teachers into a shared educational Metaverse. We focus on the system architecture of the Metaverse classroom to achieve real-time synchronization of a large number of participants and activities across physical (mixed reality classrooms) and virtual (remote VR platform) learning spaces. Our proposal attempts to transform the traditional physical classroom into virtual-physical cyberspace as a new social network of learners and educators connected at an unprecedented scale.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "During the COVID-19 pandemic, most countries have experienced some form of remote education through video conferencing software platforms. However, these software platforms fail to reduce immersion and replicate the classroom experience. The currently emerging Metaverse addresses many of such limitations by offering blended physical-digital environments. This paper aims to assess how the Metaverse can support and improve e-learning. We first survey the latest applications of blended environments in education and highlight the primary challenges and opportunities. Accordingly, we derive our proposal for a virtual-physical blended classroom configuration that brings students and teachers into a shared educational Metaverse. We focus on the system architecture of the Metaverse classroom to achieve real-time synchronization of a large number of participants and activities across physical (mixed reality classrooms) and virtual (remote VR platform) learning spaces. Our proposal attempts to transform the traditional physical classroom into virtual-physical cyberspace as a new social network of learners and educators connected at an unprecedented scale.",
"fno": "887900a241",
"keywords": [
"Computer Aided Instruction",
"Distance Learning",
"Teaching",
"Teleconferencing",
"Virtual Reality",
"Blended Environments",
"Classroom Experience",
"COVID 19 Pandemic",
"Currently Emerging Metaverse Addresses",
"Latest Applications",
"Metaverse Classroom",
"Metaverse Era",
"Mixed Reality Classrooms",
"Physical Digital Environments",
"Post COVID 19 Teaching",
"Primary Challenges",
"Remote Education",
"Remote VR Platform",
"Shared Educational Metaverse",
"Traditional Physical Classroom",
"Video Conferencing Software Platforms",
"Virtual Learning Spaces",
"Virtual Physical Blended Classroom Configuration",
"Virtual Physical Blended Classrooms",
"Virtual Physical Cyberspace",
"Metaverse",
"Pandemics",
"System Performance",
"Education",
"Systems Architecture",
"Transforms",
"Software",
"Human Computer Interaction",
"VR AR",
"Distance Learning",
"Metaverse",
"E Learning"
],
"authors": [
{
"affiliation": "Hong Kong University of Science and Technology (Guangzhou) Hong Kong University of Science and Technology",
"fullName": "Yuyang Wang",
"givenName": "Yuyang",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Korea Advanced Institute of Science and Technology",
"fullName": "Lik-Hang Lee",
"givenName": "Lik-Hang",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hong Kong University of Science and Technology",
"fullName": "Tristan Braud",
"givenName": "Tristan",
"surname": "Braud",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hong Kong University of Science and Technology (Guangzhou) Hong Kong University of Science and Technology",
"fullName": "Pan Hui",
"givenName": "Pan",
"surname": "Hui",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdcsw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "241-247",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8879-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "887900a237",
"articleId": "1IFJBSb6Zzi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "887900a248",
"articleId": "1IFJzSeVVmM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/oj/2022/01/09815155",
"title": "Fusing Blockchain and AI With Metaverse: A Survey",
"doi": null,
"abstractUrl": "/journal/oj/2022/01/09815155/1EJBce8LdBe",
"parentPublication": {
"id": "trans/oj",
"title": "IEEE Open Journal of the Computer Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2022/8810/0/881000a401",
"title": "Connecting Everyday Objects with the Metaverse: A Unified Recognition Framework",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2022/881000a401/1FJ5lIJDqYU",
"parentPublication": {
"id": "proceedings/compsac/2022/8810/0",
"title": "2022 IEEE 46th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a055",
"title": "The Digital Big Bang in the Metaverse Era",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a055/1J7WdsYCPEQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a027",
"title": "Towards a virtual business ecosystem in the Metaverse Era",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a027/1J7WujOWeCA",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2022/06/10018009",
"title": "iMetaverseKG: Industrial Metaverse Knowledge Graph to Promote Interoperability in Design and Engineering Applications",
"doi": null,
"abstractUrl": "/magazine/ic/2022/06/10018009/1JYZ2E4yUZG",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2022/06/10017431",
"title": "Virtual Dimension—A Primer to Metaverse",
"doi": null,
"abstractUrl": "/magazine/it/2022/06/10017431/1JYZF1FB6ww",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10021004",
"title": "Metaverse in Education: Vision, Opportunities, and Challenges",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10021004/1KfT4Lj5RDi",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2023/5719/0/10074583",
"title": "Metaverse-based education service adoption and preference study using conjoint analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2023/10074583/1LKwIQCvo2Y",
"parentPublication": {
"id": "proceedings/icnc/2023/5719/0",
"title": "2023 International Conference on Computing, Networking and Communications (ICNC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tps-isa/2022/7408/0/740800a039",
"title": "Auditing Metaverse Requires Multimodal Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/tps-isa/2022/740800a039/1Lxf665eb2U",
"parentPublication": {
"id": "proceedings/tps-isa/2022/7408/0",
"title": "2022 IEEE 4th International Conference on Trust, Privacy and Security in Intelligent Systems, and Applications (TPS-ISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2022/1015/0/101500a104",
"title": "Metaverse Teaching Overview",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2022/101500a104/1M4rxOG2Buo",
"parentPublication": {
"id": "proceedings/itme/2022/1015/0",
"title": "2022 12th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1LkfvODkBaM",
"title": "2022 4th International Conference on Applied Machine Learning (ICAML)",
"acronym": "icaml",
"groupId": "10056426",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1Lkfy6e2UW4",
"doi": "10.1109/ICAML57167.2022.00089",
"title": "The Current Situation and Prospect of the Development of Metaverse Technology",
"normalizedTitle": "The Current Situation and Prospect of the Development of Metaverse Technology",
"abstract": "Metaverse uses technology such as VR, AR and other technologies to realize the transition from the real world to the virtual world mapping, using the blockchain Lightning Network and NFT unique authentication to decentralize management and authentication of assets in the metaverse. This paper summarizes the development of the metaverse and determines the structural framework of the metaverse, analyzes the relationship and connection between the metaverse and the extended reality (XR) and digital twins, and realizes the advantages of looking forward to the future development of the metaverse based on current technology.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Metaverse uses technology such as VR, AR and other technologies to realize the transition from the real world to the virtual world mapping, using the blockchain Lightning Network and NFT unique authentication to decentralize management and authentication of assets in the metaverse. This paper summarizes the development of the metaverse and determines the structural framework of the metaverse, analyzes the relationship and connection between the metaverse and the extended reality (XR) and digital twins, and realizes the advantages of looking forward to the future development of the metaverse based on current technology.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Metaverse uses technology such as VR, AR and other technologies to realize the transition from the real world to the virtual world mapping, using the blockchain Lightning Network and NFT unique authentication to decentralize management and authentication of assets in the metaverse. This paper summarizes the development of the metaverse and determines the structural framework of the metaverse, analyzes the relationship and connection between the metaverse and the extended reality (XR) and digital twins, and realizes the advantages of looking forward to the future development of the metaverse based on current technology.",
"fno": "626500a444",
"keywords": [
"Augmented Reality",
"Authorisation",
"Blockchains",
"Digital Twins",
"Asset Authentication",
"Augmented Reality",
"Blockchain Lightning Network",
"Digital Twins",
"Extended Reality",
"Management Decentralization",
"Metaverse Technology",
"NFT Unique Authentication",
"Virtual Reality",
"Virtual World Mapping",
"Metaverse",
"Extended Reality",
"Authentication",
"Lightning",
"Machine Learning",
"Digital Twins",
"Blockchains",
"Metaverse",
"Blockchain",
"NFT",
"Digital Twin"
],
"authors": [
{
"affiliation": "Tibet University,Lasa,China",
"fullName": "Peisi Que",
"givenName": "Peisi",
"surname": "Que",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tibet University,Lasa,China",
"fullName": "Yihong Zeng",
"givenName": "Yihong",
"surname": "Zeng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tibet University,Lasa,China",
"fullName": "Fei Gao",
"givenName": "Fei",
"surname": "Gao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icaml",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6265-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "626500a439",
"articleId": "1LkfyoBZZja",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "626500a449",
"articleId": "1LkfyKDkyXu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acie/2022/7973/0/797300a048",
"title": "Digital Media and VR Art Creation for Metaverse",
"doi": null,
"abstractUrl": "/proceedings-article/acie/2022/797300a048/1FiydByZ4ti",
"parentPublication": {
"id": "proceedings/acie/2022/7973/0",
"title": "2022 2nd Asia Conference on Information Engineering (ACIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/oj/2022/01/09893188",
"title": "Fusion of Building Information Modeling and Blockchain for Metaverse: A Survey",
"doi": null,
"abstractUrl": "/journal/oj/2022/01/09893188/1GGLcptbShO",
"parentPublication": {
"id": "trans/oj",
"title": "IEEE Open Journal of the Computer Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a090",
"title": "Innovation and Prospect of Digital Scene Setting in the Context of Metaverse",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a090/1H2phdYgv4I",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bcd/2022/6582/0/09900579",
"title": "Metaverse Current Status and Prospects: Focusing on Metaverse Field Cases",
"doi": null,
"abstractUrl": "/proceedings-article/bcd/2022/09900579/1H44caaoIE0",
"parentPublication": {
"id": "proceedings/bcd/2022/6582/0",
"title": "2022 IEEE/ACIS 7th International Conference on Big Data, Cloud Computing, and Data Science (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2022/06/10017431",
"title": "Virtual Dimension—A Primer to Metaverse",
"doi": null,
"abstractUrl": "/magazine/it/2022/06/10017431/1JYZF1FB6ww",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/5555/01/10049293",
"title": "Blockchain Empowered Privacy-Preserving Digital Objects Trading in Metaverse",
"doi": null,
"abstractUrl": "/magazine/mu/5555/01/10049293/1KYok94FH20",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10021004",
"title": "Metaverse in Education: Vision, Opportunities, and Challenges",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10021004/1KfT4Lj5RDi",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2023/5719/0/10074107",
"title": "A privacy awareness framework for NFT avatars in the metaverse",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2023/10074107/1LKwD3J0cvu",
"parentPublication": {
"id": "proceedings/icnc/2023/5719/0",
"title": "2023 International Conference on Computing, Networking and Communications (ICNC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tps-isa/2022/7408/0/740800a039",
"title": "Auditing Metaverse Requires Multimodal Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/tps-isa/2022/740800a039/1Lxf665eb2U",
"parentPublication": {
"id": "proceedings/tps-isa/2022/7408/0",
"title": "2022 IEEE 4th International Conference on Trust, Privacy and Security in Intelligent Systems, and Applications (TPS-ISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icei/2022/9327/0/932700a007",
"title": "Metaverse Applications in Energy Internet",
"doi": null,
"abstractUrl": "/proceedings-article/icei/2022/932700a007/1MhIoEOvaI8",
"parentPublication": {
"id": "proceedings/icei/2022/9327/0",
"title": "2022 IEEE International Conference on Energy Internet (ICEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCxbXAC",
"doi": "10.1109/VR.2017.7892295",
"title": "Gauntlet: Travel technique for immersive environments using non-dominant hand",
"normalizedTitle": "Gauntlet: Travel technique for immersive environments using non-dominant hand",
"abstract": "We present Gauntlet, a travel technique for immersive environments that uses non-dominant hand tracking and a fist gesture to translate and rotate the viewport. The technique allows for simultaneous use of the dominant hand for other spatial input tasks. Applications of Gauntlet include FPS games, and other application domains where navigation should be performed together with other tasks. We release the technique along with an example application, a VR horror game, as an open source project.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present Gauntlet, a travel technique for immersive environments that uses non-dominant hand tracking and a fist gesture to translate and rotate the viewport. The technique allows for simultaneous use of the dominant hand for other spatial input tasks. Applications of Gauntlet include FPS games, and other application domains where navigation should be performed together with other tasks. We release the technique along with an example application, a VR horror game, as an open source project.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present Gauntlet, a travel technique for immersive environments that uses non-dominant hand tracking and a fist gesture to translate and rotate the viewport. The technique allows for simultaneous use of the dominant hand for other spatial input tasks. Applications of Gauntlet include FPS games, and other application domains where navigation should be performed together with other tasks. We release the technique along with an example application, a VR horror game, as an open source project.",
"fno": "07892295",
"keywords": [
"Games",
"Fatigue",
"Navigation",
"Performance Evaluation",
"Tracking",
"Virtual Reality",
"Optical Devices",
"Navigation",
"Travel",
"Virtual Reality",
"Head Mounted Display"
],
"authors": [
{
"affiliation": "Game Research Lab, California State University Monterey Bay, USA",
"fullName": "Mathew Tomberlin",
"givenName": "Mathew",
"surname": "Tomberlin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IDEA Lab, University of Waterloo, Canada",
"fullName": "Liudmila Tahai",
"givenName": "Liudmila",
"surname": "Tahai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Game Research Lab, California State University Monterey Bay, USA",
"fullName": "Krzysztof Pietroszek",
"givenName": "Krzysztof",
"surname": "Pietroszek",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "299-300",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892294",
"articleId": "12OmNwseEVh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892296",
"articleId": "12OmNASILIc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892227",
"title": "Guided head rotation and amplified head rotation: Evaluating semi-natural travel and viewing techniques in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892227/12OmNwseEYz",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/earlyaspects/2007/2957/0/29570001",
"title": "A Clustering Technique for Early Detection of Dominant and Recessive Cross-Cutting Concerns",
"doi": null,
"abstractUrl": "/proceedings-article/earlyaspects/2007/29570001/12OmNwt5snR",
"parentPublication": {
"id": "proceedings/earlyaspects/2007/2957/0",
"title": "Early Aspects: Workshop in Aspect-Oriented Requirements Engineering and Architecture Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2010/9046/0/05665865",
"title": "Augmented Viewport: An action at a distance technique for outdoor AR using distant and zoom lens cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2010/05665865/12OmNy3iFk0",
"parentPublication": {
"id": "proceedings/iswc/2010/9046/0",
"title": "International Symposium on Wearable Computers (ISWC) 2010",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/04/ttg2010040690",
"title": "Evaluation of the Cognitive Effects of Travel Technique in Complex Real and Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2010/04/ttg2010040690/13rRUIM2VGZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/06/v0694",
"title": "Comparison of path visualizations and cognitive measures relative to travel technique in a virtual environment",
"doi": null,
"abstractUrl": "/journal/tg/2005/06/v0694/13rRUxYrbUt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hipc/2021/1016/0/101600a448",
"title": "A computational technique for parallel solution of diagonally dominant banded linear systems",
"doi": null,
"abstractUrl": "/proceedings-article/hipc/2021/101600a448/1AqycjTBzva",
"parentPublication": {
"id": "proceedings/hipc/2021/1016/0",
"title": "2021 IEEE 28th International Conference on High Performance Computing, Data, and Analytics (HiPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2022/1332/0/09687247",
"title": "Rethinking Fatigue-Aware 6DoF Video Streaming: Focusing on MPEG Immersive Video",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2022/09687247/1AtQasGiV8s",
"parentPublication": {
"id": "proceedings/icoin/2022/1332/0",
"title": "2022 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a908",
"title": "Heart-In-Hand, swapping point of view for immersive navigation in medical cardiology",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a908/1CJcZCgNWla",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049645",
"title": "GestureSurface: VR Sketching through Assembling Scaffold Surface with Non-Dominant Hand",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049645/1KYoyLX55fy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090498",
"title": "Improving Camera Travel for Immersive Colonography",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090498/1jIxogcnA3K",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1HYuP5tn7Us",
"title": "2022 International Conference on Interactive Media, Smart Systems and Emerging Technologies (IMET)",
"acronym": "imet",
"groupId": "1847924",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1HYuRB6nAty",
"doi": "10.1109/IMET54801.2022.9929445",
"title": "Explorative Study on Asymmetric Sketch Interactions for Object Retrieval in Virtual Reality",
"normalizedTitle": "Explorative Study on Asymmetric Sketch Interactions for Object Retrieval in Virtual Reality",
"abstract": "Drawing tools for Virtual Reality (VR) enable users to model 3D designs from within the virtual environment itself. These tools employ sketching and sculpting techniques known from desktop-based interfaces and apply them to hand-based controller interaction. While these techniques allow for mid-air sketching of basic shapes, it remains difficult for users to create detailed and comprehensive 3D models. Our work focuses on supporting the user in designing the virtual environment around them by enhancing sketch-based interfaces with a supporting system for interactive model retrieval. An immersed user can query a database containing detailed 3D models and replace them with the virtual environment through sketching. To understand supportive sketching within a virtual environment, we made an explorative comparison between asymmetric methods of sketch interaction, i.e., 3D mid-air sketching, 2D sketching on a virtual tablet, 2D sketching on a fixed virtual whiteboard, and 2D sketching on a real tablet. Our work shows that different patterns emerge when users interact with 3D sketches rather than 2D sketches to compensate for different results from the retrieval system. In particular, the user adopts strategies when drawing on canvas of different sizes or using a physical device instead of a virtual canvas. While we pose our work as a retrieval problem for 3D models of chairs, our results can be extrapolated to other sketching tasks for virtual environments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Drawing tools for Virtual Reality (VR) enable users to model 3D designs from within the virtual environment itself. These tools employ sketching and sculpting techniques known from desktop-based interfaces and apply them to hand-based controller interaction. While these techniques allow for mid-air sketching of basic shapes, it remains difficult for users to create detailed and comprehensive 3D models. Our work focuses on supporting the user in designing the virtual environment around them by enhancing sketch-based interfaces with a supporting system for interactive model retrieval. An immersed user can query a database containing detailed 3D models and replace them with the virtual environment through sketching. To understand supportive sketching within a virtual environment, we made an explorative comparison between asymmetric methods of sketch interaction, i.e., 3D mid-air sketching, 2D sketching on a virtual tablet, 2D sketching on a fixed virtual whiteboard, and 2D sketching on a real tablet. Our work shows that different patterns emerge when users interact with 3D sketches rather than 2D sketches to compensate for different results from the retrieval system. In particular, the user adopts strategies when drawing on canvas of different sizes or using a physical device instead of a virtual canvas. While we pose our work as a retrieval problem for 3D models of chairs, our results can be extrapolated to other sketching tasks for virtual environments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Drawing tools for Virtual Reality (VR) enable users to model 3D designs from within the virtual environment itself. These tools employ sketching and sculpting techniques known from desktop-based interfaces and apply them to hand-based controller interaction. While these techniques allow for mid-air sketching of basic shapes, it remains difficult for users to create detailed and comprehensive 3D models. Our work focuses on supporting the user in designing the virtual environment around them by enhancing sketch-based interfaces with a supporting system for interactive model retrieval. An immersed user can query a database containing detailed 3D models and replace them with the virtual environment through sketching. To understand supportive sketching within a virtual environment, we made an explorative comparison between asymmetric methods of sketch interaction, i.e., 3D mid-air sketching, 2D sketching on a virtual tablet, 2D sketching on a fixed virtual whiteboard, and 2D sketching on a real tablet. Our work shows that different patterns emerge when users interact with 3D sketches rather than 2D sketches to compensate for different results from the retrieval system. In particular, the user adopts strategies when drawing on canvas of different sizes or using a physical device instead of a virtual canvas. While we pose our work as a retrieval problem for 3D models of chairs, our results can be extrapolated to other sketching tasks for virtual environments.",
"fno": "09929445",
"keywords": [
"CAD",
"Solid Modelling",
"User Interfaces",
"Virtual Reality",
"Asymmetric Sketch Interactions",
"Virtual Reality",
"Model 3 D Designs",
"Virtual Environment",
"Desktop Based Interfaces",
"Mid Air Sketching",
"Detailed D Models",
"Comprehensive 3 D Models",
"Sketch Based Interfaces",
"Interactive Model Retrieval",
"Immersed User",
"Detailed 3 D Models",
"Supportive Sketching",
"Sketch Interaction",
"Virtual Tablet",
"Fixed Virtual Whiteboard",
"Users Interact",
"Virtual Canvas",
"Sketching Tasks",
"Solid Modeling",
"Three Dimensional Displays",
"Shape",
"Databases",
"Virtual Environments",
"Media",
"Task Analysis",
"Sketch",
"Virtual Reality",
"CNN",
"HCI"
],
"authors": [
{
"affiliation": "University College London,London,United Kingdom",
"fullName": "Daniele Giunchi",
"givenName": "Daniele",
"surname": "Giunchi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Istituto Italiano di Tecnologia,Genoa,Italy",
"fullName": "Stuart James",
"givenName": "Stuart",
"surname": "James",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London,London,United Kingdom",
"fullName": "Riccardo Bovo",
"givenName": "Riccardo",
"surname": "Bovo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "German Research Center for Artificial Intelligence (DFKI)),Saarbrücken,Germany",
"fullName": "Donald Degraen",
"givenName": "Donald",
"surname": "Degraen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University College London,London,United Kingdom",
"fullName": "Anthony Steed",
"givenName": "Anthony",
"surname": "Steed",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "imet",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-7016-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09929765",
"articleId": "1HYuPKvPpSM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09929635",
"articleId": "1HYuUB6Kz4I",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccsee/2012/4647/3/4647c265",
"title": "B/S Based 2D Sketch System",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647c265/12OmNBeRtOU",
"parentPublication": {
"id": "proceedings/iccsee/2012/4647/3",
"title": "Computer Science and Electronics Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2016/4149/0/4149a100",
"title": "A New User-Friendly Sketch-Based Modeling Method Using Convolution Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2016/4149a100/12OmNy4IF9u",
"parentPublication": {
"id": "proceedings/svr/2016/4149/0",
"title": "2016 XVIII Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2014/4311/0/4311a245",
"title": "3D Sketch System Based on Life-Sized and Operable Concept Enhanced by Three Design Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2014/4311a245/12OmNzVoBQI",
"parentPublication": {
"id": "proceedings/ism/2014/4311/0",
"title": "2014 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/10/08421591",
"title": "Model-Guided 3D Sketching",
"doi": null,
"abstractUrl": "/journal/tg/2019/10/08421591/13rRUEgs2Mb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/06/mcg2017060088",
"title": "Sketch-Based Articulated 3D Shape Retrieval",
"doi": null,
"abstractUrl": "/magazine/cg/2017/06/mcg2017060088/13rRUwfqpG7",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a264",
"title": "Sketch/Image-Based 3D Scene Retrieval: Benchmark, Algorithm, Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a264/19wB54xJEuQ",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a383",
"title": "Structure-Aware 3D VR Sketch to 3D Shape Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a383/1KYsqgmUniE",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/07/08964443",
"title": "DeepSketchHair: Deep Sketch-Based 3D Hair Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2021/07/08964443/1gLZSnCp3Ko",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a826",
"title": "Mid-Air Finger Sketching for Tree Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a826/1tuBbGEUWm4",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a321",
"title": "Simultaneous Real Walking and Asymmetric Input in Virtual Reality with a Smartphone-based Hybrid Interface",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a321/1yeQEyk3fbO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwkhTjj",
"title": "2017 International Conference on High-Performance Computing & Simulation (HPCS)",
"acronym": "hpcs",
"groupId": "1800007",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAlNixP",
"doi": "10.1109/HPCS.2017.124",
"title": "An Efficient Codec for Image Compression Based on Spline Wavelet Transform and Improved SPIHT Algorithm",
"normalizedTitle": "An Efficient Codec for Image Compression Based on Spline Wavelet Transform and Improved SPIHT Algorithm",
"abstract": "This paper presents an efficient codec which is based on an optimal spline wavelet transform and an improved Set Partitioning in Hierarchical Trees algorithm. A comparative study of the proposed codec with the existing works using the polynomial spline based transform and the biorthogonal B9/7 which is frequently used in image compression is done. Peak signal-to-noise ratio (PSNR), structural similarity index measure (SSIM) and encoding time are used for evaluation purpose. The obtained results prove the efficiency and the speed of the proposed codec.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an efficient codec which is based on an optimal spline wavelet transform and an improved Set Partitioning in Hierarchical Trees algorithm. A comparative study of the proposed codec with the existing works using the polynomial spline based transform and the biorthogonal B9/7 which is frequently used in image compression is done. Peak signal-to-noise ratio (PSNR), structural similarity index measure (SSIM) and encoding time are used for evaluation purpose. The obtained results prove the efficiency and the speed of the proposed codec.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an efficient codec which is based on an optimal spline wavelet transform and an improved Set Partitioning in Hierarchical Trees algorithm. A comparative study of the proposed codec with the existing works using the polynomial spline based transform and the biorthogonal B9/7 which is frequently used in image compression is done. Peak signal-to-noise ratio (PSNR), structural similarity index measure (SSIM) and encoding time are used for evaluation purpose. The obtained results prove the efficiency and the speed of the proposed codec.",
"fno": "08035163",
"keywords": [
"Codecs",
"Data Compression",
"Image Coding",
"Polynomials",
"Splines Mathematics",
"Trees Mathematics",
"Wavelet Transforms",
"Efficient Codec",
"Image Compression",
"SPIHT Algorithm",
"Improved Set Partitioning",
"Hierarchical Trees Algorithm",
"Polynomial Spline",
"Structural Similarity Index Measure",
"Spline Wavelet Transform",
"Biorthogonal B 9 7",
"Peak Signal To Noise Ratio",
"Image Coding",
"Wavelet Transforms",
"Splines Mathematics",
"Codecs",
"Binary Trees",
"Encoding",
"Biorthogonal Wavelet Transforms",
"Spline",
"SPIHT",
"PSNR",
"SSIM",
"Encoding Time"
],
"authors": [
{
"affiliation": null,
"fullName": "Rania Boujelbene",
"givenName": "Rania",
"surname": "Boujelbene",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yousra Ben Jemaa",
"givenName": "Yousra Ben",
"surname": "Jemaa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mourad Zribi",
"givenName": "Mourad",
"surname": "Zribi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hpcs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "819-825",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-3250-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08035162",
"articleId": "12OmNywfKDD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08035164",
"articleId": "12OmNBhZ4fz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1995/7310/1/73100422",
"title": "Wavelet transform matched filters for the detection and classification of microcalcifications in mammography",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100422/12OmNBuL1nr",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/paciia/2008/3490/1/3490a360",
"title": "Face Recognition Using Cubic B-Spline Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/paciia/2008/3490a360/12OmNBubOT3",
"parentPublication": {
"id": "proceedings/paciia/2008/3490/1",
"title": "Pacific-Asia Workshop on Computational Intelligence and Industrial Application, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2014/5720/0/5720a005",
"title": "An Approach to Describe Parametric Curves Using Hough-Based Arc Spline Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2014/5720a005/12OmNBv2CeI",
"parentPublication": {
"id": "proceedings/cgiv/2014/5720/0",
"title": "2014 11th International Conference on Computer Graphics, Imaging and Visualization (CGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2006/2746/0/274600011",
"title": "DCT-Based Image Codec Embedded Cubic Spline Interpolation with Optimal Quantization",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2006/274600011/12OmNs0kyBp",
"parentPublication": {
"id": "proceedings/ism/2006/2746/0",
"title": "Eighth IEEE International Symposium on Multimedia (ISM'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945738",
"title": "Toward an optimal B-spline wavelet transform for image compression",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945738/12OmNwAKCKX",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2008/3494/2/3494b502",
"title": "Detection of QRS Complexes Based on Biorthogonal Spline Wavelet",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/3494b502/12OmNwKoZhr",
"parentPublication": {
"id": "proceedings/isise/2008/3494/2",
"title": "2008 International Symposium on Information Science and Engineering (ISISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2013/4796/0/06781890",
"title": "Image enhancement using E-spline functions",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2013/06781890/12OmNwM6A0m",
"parentPublication": {
"id": "proceedings/isspit/2013/4796/0",
"title": "2013 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/4/05745635",
"title": "Full scheme of MPEG4-like codec based on wavelet transform",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745635/12OmNxX3uQr",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/4",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031c498",
"title": "Unsymmetrical SPIHT Codec and 1D SPIHT Codec",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031c498/12OmNxZBSBT",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdataservice/2019/0059/0/005900a316",
"title": "A High-Precise Arrhythmia Detection Method Based on Biorthogonal Wavelet and Fully Connected Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/bigdataservice/2019/005900a316/1dDLX5OQI92",
"parentPublication": {
"id": "proceedings/bigdataservice/2019/0059/0",
"title": "2019 IEEE Fifth International Conference on Big Data Computing Service and Applications (BigDataService)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAlvHtF",
"doi": "10.1109/VR.2017.7892229",
"title": "6-DOF VR videos with a single 360-camera",
"normalizedTitle": "6-DOF VR videos with a single 360-camera",
"abstract": "Recent breakthroughs in consumer level virtual reality (VR) headsets are creating a growing user-base in demand for immersive, full 3D VR experiences. While monoscopic 360-videos are perhaps the most prevalent type of content for VR headsets, they lack 3D information and thus cannot be viewed with full 6 degree-of-freedom (DOF). We present an approach that addresses this limitation via a novel warping algorithm that can synthesize new views both with rotational and translational motion of the viewpoint. This enables the ability to perform VR playback of input monoscopic 360-videos files in full stereo with full 6-DOF of head motion. Our method synthesizes novel views for each eye in accordance with the 6-DOF motion of the headset. Our solution tailors standard structure-from-motion and dense reconstruction algorithms to work accurately for 360-videos and is optimized for GPUs to achieve VR frame rates (>120 fps). We demonstrate the effectiveness our approach on a variety of videos with interesting content.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent breakthroughs in consumer level virtual reality (VR) headsets are creating a growing user-base in demand for immersive, full 3D VR experiences. While monoscopic 360-videos are perhaps the most prevalent type of content for VR headsets, they lack 3D information and thus cannot be viewed with full 6 degree-of-freedom (DOF). We present an approach that addresses this limitation via a novel warping algorithm that can synthesize new views both with rotational and translational motion of the viewpoint. This enables the ability to perform VR playback of input monoscopic 360-videos files in full stereo with full 6-DOF of head motion. Our method synthesizes novel views for each eye in accordance with the 6-DOF motion of the headset. Our solution tailors standard structure-from-motion and dense reconstruction algorithms to work accurately for 360-videos and is optimized for GPUs to achieve VR frame rates (>120 fps). We demonstrate the effectiveness our approach on a variety of videos with interesting content.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent breakthroughs in consumer level virtual reality (VR) headsets are creating a growing user-base in demand for immersive, full 3D VR experiences. While monoscopic 360-videos are perhaps the most prevalent type of content for VR headsets, they lack 3D information and thus cannot be viewed with full 6 degree-of-freedom (DOF). We present an approach that addresses this limitation via a novel warping algorithm that can synthesize new views both with rotational and translational motion of the viewpoint. This enables the ability to perform VR playback of input monoscopic 360-videos files in full stereo with full 6-DOF of head motion. Our method synthesizes novel views for each eye in accordance with the 6-DOF motion of the headset. Our solution tailors standard structure-from-motion and dense reconstruction algorithms to work accurately for 360-videos and is optimized for GPUs to achieve VR frame rates (>120 fps). We demonstrate the effectiveness our approach on a variety of videos with interesting content.",
"fno": "07892229",
"keywords": [
"Cameras",
"Three Dimensional Displays",
"Videos",
"Image Reconstruction",
"Headphones",
"Geometry",
"Tracking",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality",
"I 2 10 Artificial Intelligence Vision And Scene Understanding Video Analysis",
"I 4 8 Image Processing And Computer Vision Scene Analysis Motion"
],
"authors": [
{
"affiliation": "Stanford University, Adobe Research, USA",
"fullName": "Jingwei Huang",
"givenName": "Jingwei",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research, USA",
"fullName": "Zhili Chen",
"givenName": "Zhili",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research, USA",
"fullName": "Duygu Ceylan",
"givenName": "Duygu",
"surname": "Ceylan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research, USA",
"fullName": "Hailin Jin",
"givenName": "Hailin",
"surname": "Jin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "37-44",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892228",
"articleId": "12OmNzQhP84",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892230",
"articleId": "12OmNx5GTZ2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2016/4571/0/4571a107",
"title": "Adaptive 360 VR Video Streaming: Divide and Conquer",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a107/12OmNAMtAMS",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601b405",
"title": "Stabilizing First Person 360 Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b405/12OmNAWpyow",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2016/4571/0/4571a407",
"title": "Adaptive 360 VR Video Streaming Based on MPEG-DASH SRD",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a407/12OmNx7XH8C",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446523",
"title": "COP: A New Continuous Packing Layout for 360 VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446523/13bd1fKQxs3",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f333",
"title": "Gaze Prediction in Dynamic 360° Immersive Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f333/17D45VW8brT",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09779957",
"title": "Casual 6-DoF: free-viewpoint panorama using a handheld 360° camera",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09779957/1DBTD2uB4di",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798261",
"title": "Hybrid Projection For Encoding 360 VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798261/1cJ0Wb1xK4E",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09124686",
"title": "Stimulus Sampling With 360-Videos: Examining Head Movements, Arousal, Presence, Simulator Sickness, and Preference on a Large Sample of Participants and Videos",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09124686/1kVbwGkgqYg",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a371",
"title": "Annotation Tool for Precise Emotion Ground Truth Label Acquisition while Watching 360° VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a371/1qpzCZXhpS0",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/04/09384236",
"title": "The Potential of 360° Virtual Reality Videos and Real VR for Education—A Literature Review",
"doi": null,
"abstractUrl": "/magazine/cg/2021/04/09384236/1scDA5NYISI",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBzRNsg",
"title": "2nd International Conference on Machine Learning and Computing (ICMLC 2010)",
"acronym": "icmlc",
"groupId": "1800234",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBlXs4E",
"doi": "10.1109/ICMLC.2010.39",
"title": "Video Coding Technique Using Swarm Intelligence in 3-D Dual Tree Complex Wavelet Transform",
"normalizedTitle": "Video Coding Technique Using Swarm Intelligence in 3-D Dual Tree Complex Wavelet Transform",
"abstract": "Video compression plays an important role in video signal processing, transmission and storage. Since the available bandwidth for transmission is very limited, Multimedia Applications such as video conferencing, video on demand, video telephony and remote sensing are not possible without compression. A lot of video compression techniques have been developed and the video signal transmission has followed at data rates below 64kbps. Wavelet transform based motion compensated video codec performs better compression in order to meet the rate and distortion constraint in video transmission for the available bandwidth than the block based techniques, which are followed in standard video transmissions such as H.261 and H.263. But the efficiency of those technique's depends on the way in which it estimates and compensates the object motions in the video sequence. Wavelet based embedded image coder is quite attractive in modern multimedia applications. Wavelet transform, bit plane coding and other techniques make embedded image coder practical and also provide efficient compression. In this paper, we have proposed a novel video coding using swarm intelligence in dual tree complex wavelet transform for video coding. The 3-D DDWT is an attractive video representation because it isolates motion along different directions in separate subbands. However, it is an over-complete transform with redundancy, which is going to be eliminated by choosing optimal subbands with the help of PSO. The proposed video codec does not require motion compensation and provides better performance than the 3D SPIHT (Embedded type)codec, both objectively and subjectively, and the coder allows full scalability in spatial, temporal and quality dimensions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Video compression plays an important role in video signal processing, transmission and storage. Since the available bandwidth for transmission is very limited, Multimedia Applications such as video conferencing, video on demand, video telephony and remote sensing are not possible without compression. A lot of video compression techniques have been developed and the video signal transmission has followed at data rates below 64kbps. Wavelet transform based motion compensated video codec performs better compression in order to meet the rate and distortion constraint in video transmission for the available bandwidth than the block based techniques, which are followed in standard video transmissions such as H.261 and H.263. But the efficiency of those technique's depends on the way in which it estimates and compensates the object motions in the video sequence. Wavelet based embedded image coder is quite attractive in modern multimedia applications. Wavelet transform, bit plane coding and other techniques make embedded image coder practical and also provide efficient compression. In this paper, we have proposed a novel video coding using swarm intelligence in dual tree complex wavelet transform for video coding. The 3-D DDWT is an attractive video representation because it isolates motion along different directions in separate subbands. However, it is an over-complete transform with redundancy, which is going to be eliminated by choosing optimal subbands with the help of PSO. The proposed video codec does not require motion compensation and provides better performance than the 3D SPIHT (Embedded type)codec, both objectively and subjectively, and the coder allows full scalability in spatial, temporal and quality dimensions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Video compression plays an important role in video signal processing, transmission and storage. Since the available bandwidth for transmission is very limited, Multimedia Applications such as video conferencing, video on demand, video telephony and remote sensing are not possible without compression. A lot of video compression techniques have been developed and the video signal transmission has followed at data rates below 64kbps. Wavelet transform based motion compensated video codec performs better compression in order to meet the rate and distortion constraint in video transmission for the available bandwidth than the block based techniques, which are followed in standard video transmissions such as H.261 and H.263. But the efficiency of those technique's depends on the way in which it estimates and compensates the object motions in the video sequence. Wavelet based embedded image coder is quite attractive in modern multimedia applications. Wavelet transform, bit plane coding and other techniques make embedded image coder practical and also provide efficient compression. In this paper, we have proposed a novel video coding using swarm intelligence in dual tree complex wavelet transform for video coding. The 3-D DDWT is an attractive video representation because it isolates motion along different directions in separate subbands. However, it is an over-complete transform with redundancy, which is going to be eliminated by choosing optimal subbands with the help of PSO. The proposed video codec does not require motion compensation and provides better performance than the 3D SPIHT (Embedded type)codec, both objectively and subjectively, and the coder allows full scalability in spatial, temporal and quality dimensions.",
"fno": "05460747",
"keywords": [
"Motion Compensation",
"Particle Swarm Optimisation",
"Trees Mathematics",
"Video Codecs",
"Video Coding",
"Wavelet Transforms",
"Video Coding Technique",
"Swarm Intelligence",
"3 D Dual Tree Complex Wavelet Transform",
"Video Compression",
"Video Conferencing",
"Video On Demand",
"Video Telephony",
"Remote Sensing",
"Motion Compensated Video Codec",
"H 261 Video Standard",
"H 263 But Video Standard",
"Embedded Image Coder",
"Bit Plane Coding",
"Video Representation",
"PSO",
"Video Coding",
"Particle Swarm Optimization",
"Wavelet Transforms",
"Video Compression",
"Video On Demand",
"Videoconference",
"Bandwidth",
"Video Codecs",
"Image Coding",
"Video Signal Processing",
"DDWT Dualtree Discrete Wavelet Transform",
"NS Noise Shaping",
"DDWTVC Dual Tree Discrete Wavelet Transform With Vector Coding",
"PSO Particle Swarm Optimization"
],
"authors": [
{
"affiliation": null,
"fullName": "M. Thamarai",
"givenName": "M.",
"surname": "Thamarai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "R. Shanmugalakshmi",
"givenName": "R.",
"surname": "Shanmugalakshmi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmlc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-02-01T00:00:00",
"pubType": "proceedings",
"pages": "174-178",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-6006-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05460748",
"articleId": "12OmNvkplg6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05460746",
"articleId": "12OmNBdrubz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acssc/1991/2470/0/00186490",
"title": "A high quality digital HDTV codec",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1991/00186490/12OmNAkEU69",
"parentPublication": {
"id": "proceedings/acssc/1991/2470/0",
"title": "Conference Record of the Twenty-Fifth Asilomar Conference on Signals, Systems & Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcs/2017/3250/0/08035163",
"title": "An Efficient Codec for Image Compression Based on Spline Wavelet Transform and Improved SPIHT Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/hpcs/2017/08035163/12OmNAlNixP",
"parentPublication": {
"id": "proceedings/hpcs/2017/3250/0",
"title": "2017 International Conference on High-Performance Computing & Simulation (HPCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284914",
"title": "Video Coding using 3-D Anisotropic Dual-Tree Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284914/12OmNBqv2p5",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607515",
"title": "Layered screen video coding leveraging hardware video codec",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607515/12OmNBuL154",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/4/05745635",
"title": "Full scheme of MPEG4-like codec based on wavelet transform",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745635/12OmNxX3uQr",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/4",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2015/6959/0/07069380",
"title": "Real-time CPU based H.265/HEVC encoding solution with x86 platform technology",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2015/07069380/12OmNyKJihB",
"parentPublication": {
"id": "proceedings/icnc/2015/6959/0",
"title": "2015 International Conference on Computing, Networking and Communications (ICNC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g660",
"title": "Extending Neural P-frame Codecs for B-frame Coding",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g660/1BmGwYMY9y0",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049696",
"title": "Wavelet-Based Fast Decoding of 360° Videos",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049696/1KYoz753Sxi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1997/7761/0/00582071",
"title": "Video compression with weighted finite automata",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1997/00582071/1dUnbHjkxNK",
"parentPublication": {
"id": "proceedings/dcc/1997/7761/0",
"title": "Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428224",
"title": "Learned Image Coding for Machines: A Content-Adaptive Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428224/1uim3hMR6gg",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxE2mTG",
"title": "2007 International Conference on Multimedia & Expo",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBqv2p5",
"doi": "10.1109/ICME.2007.4284914",
"title": "Video Coding using 3-D Anisotropic Dual-Tree Wavelet Transform",
"normalizedTitle": "Video Coding using 3-D Anisotropic Dual-Tree Wavelet Transform",
"abstract": "This paper investigates the use of the anisotropic 3-D dual-tree discrete wavelet transform (DDWT) for video coding. The 3-D DDWT is an attractive video representation because it isolates image patterns with different spatial orientations and motion directions and speeds in separate subbands. Our previous codecs using the 3-D isotropic DDWT provides better performance than the 3-D SPIHT codec on the 3-D DWT. In this paper, we explore the use of anisotropic DDWT (ADDWT) for video coding. The ADDWT extends the superiority of the normal DDWT with more directional subbands without adding to the redundancy. The proposed codec applies SPIHT to each of the ADDWT trees. This codec provides significantly better performance than the 3-D SPIHT codec using the standard DWT and the DDWT both objectively and subjectively. None of these video codecs requires motion compensation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper investigates the use of the anisotropic 3-D dual-tree discrete wavelet transform (DDWT) for video coding. The 3-D DDWT is an attractive video representation because it isolates image patterns with different spatial orientations and motion directions and speeds in separate subbands. Our previous codecs using the 3-D isotropic DDWT provides better performance than the 3-D SPIHT codec on the 3-D DWT. In this paper, we explore the use of anisotropic DDWT (ADDWT) for video coding. The ADDWT extends the superiority of the normal DDWT with more directional subbands without adding to the redundancy. The proposed codec applies SPIHT to each of the ADDWT trees. This codec provides significantly better performance than the 3-D SPIHT codec using the standard DWT and the DDWT both objectively and subjectively. None of these video codecs requires motion compensation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper investigates the use of the anisotropic 3-D dual-tree discrete wavelet transform (DDWT) for video coding. The 3-D DDWT is an attractive video representation because it isolates image patterns with different spatial orientations and motion directions and speeds in separate subbands. Our previous codecs using the 3-D isotropic DDWT provides better performance than the 3-D SPIHT codec on the 3-D DWT. In this paper, we explore the use of anisotropic DDWT (ADDWT) for video coding. The ADDWT extends the superiority of the normal DDWT with more directional subbands without adding to the redundancy. The proposed codec applies SPIHT to each of the ADDWT trees. This codec provides significantly better performance than the 3-D SPIHT codec using the standard DWT and the DDWT both objectively and subjectively. None of these video codecs requires motion compensation.",
"fno": "04284914",
"keywords": [
"Discrete Wavelet Transforms",
"Transform Coding",
"Video Codecs",
"Video Coding",
"3 D Anisotropic Dual Tree Wavelet Transform",
"Discrete Wavelet Transform",
"Image Patterns",
"Video Codecs",
"Motion Compensation",
"Video Coding",
"Anisotropic Magnetoresistance",
"Wavelet Transforms",
"Discrete Wavelet Transforms",
"Noise Shaping",
"Video Codecs",
"Frequency",
"Arithmetic",
"Code Standards",
"Spatial Resolution"
],
"authors": [
{
"affiliation": "Department of Automation, Tsinghua Universty, Beijing, China",
"fullName": "Jingyu Yang",
"givenName": "Jingyu",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Electrical and Computer Engineering Dept, Polytechnic Univeristy, Brooklyn, NY, USA",
"fullName": "Beibei Wang",
"givenName": "Beibei",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Electrical and Computer Engineering Dept, Polytechnic Univeristy, Brooklyn, NY, USA",
"fullName": "Yao Wang",
"givenName": "Yao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Automation, Tsinghua Universty, Beijing, China",
"fullName": "Wenli Xu",
"givenName": "Wenli",
"surname": "Xu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-07-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2007",
"issn": "1945-7871",
"isbn": "1-4244-1016-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04284913",
"articleId": "12OmNs59K1w",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04284915",
"articleId": "12OmNvA1hcw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icime/2009/3595/0/3595a090",
"title": "Muti-focus Image Fusion Using Wavelet Based Contourlet Transform and Region",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2009/3595a090/12OmNB8CiVI",
"parentPublication": {
"id": "proceedings/icime/2009/3595/0",
"title": "Information Management and Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isme/2010/4132/1/4132a559",
"title": "Images Compression Using Dual Tree Complex Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/isme/2010/4132a559/12OmNBA9oBL",
"parentPublication": {
"id": "isme/2010/4132/1",
"title": "Information Science and Management Engineering, International Conference of",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2008/3358/0/3358a147",
"title": "Three-Dimensional Transforms and Entropy Coders for a Fast Embedded Color Video Codec",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2008/3358a147/12OmNBTawqS",
"parentPublication": {
"id": "proceedings/sibgrapi/2008/3358/0",
"title": "2008 XXI Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmlc/2010/6006/0/05460747",
"title": "Video Coding Technique Using Swarm Intelligence in 3-D Dual Tree Complex Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/icmlc/2010/05460747/12OmNBlXs4E",
"parentPublication": {
"id": "proceedings/icmlc/2010/6006/0",
"title": "2nd International Conference on Machine Learning and Computing (ICMLC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761211",
"title": "Face recognition using anisotropic dual-tree complex wavelet packets",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761211/12OmNCcKQeA",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/4/05745392",
"title": "A new 3-D subband video coding technique",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745392/12OmNqI04RE",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/4",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a225",
"title": "Multiview Video Coding Based on Wavelet Pyramids",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a225/12OmNwswg09",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/3/05745336",
"title": "VLSI architecture for a new real-time 3D wavelet transform",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745336/12OmNxcMSbv",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/3",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/act/2009/3915/0/05376505",
"title": "Wavelet Based Image Compression: A Comparative Study",
"doi": null,
"abstractUrl": "/proceedings-article/act/2009/05376505/13bd1gQYgDO",
"parentPublication": {
"id": "proceedings/act/2009/3915/0",
"title": "Advances in Computing, Control, and Telecommunication Technologies, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049696",
"title": "Wavelet-Based Fast Decoding of 360° Videos",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049696/1KYoz753Sxi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBbaH9O",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"acronym": "ism",
"groupId": "1001094",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvT2pdg",
"doi": "10.1109/ISM.2017.12",
"title": "Multi-generation-robust Coding with JPEG XS",
"normalizedTitle": "Multi-generation-robust Coding with JPEG XS",
"abstract": "The JPEG committee (formally, ISO SC29 WG1) is currently standardizing a lightweight mezzanine codec for video over IP transport under the name JPEG XS. A particular challenging design constraint of this codec is multi-generation robustness, that is the necessity to minimize the error built-up under multiple re-compression cycles. In this paper, we discuss the sources of such errors, how they are avoided in the JPEG XS design and compare the multi-generation robustness of JPEG XS with that of other codecs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The JPEG committee (formally, ISO SC29 WG1) is currently standardizing a lightweight mezzanine codec for video over IP transport under the name JPEG XS. A particular challenging design constraint of this codec is multi-generation robustness, that is the necessity to minimize the error built-up under multiple re-compression cycles. In this paper, we discuss the sources of such errors, how they are avoided in the JPEG XS design and compare the multi-generation robustness of JPEG XS with that of other codecs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The JPEG committee (formally, ISO SC29 WG1) is currently standardizing a lightweight mezzanine codec for video over IP transport under the name JPEG XS. A particular challenging design constraint of this codec is multi-generation robustness, that is the necessity to minimize the error built-up under multiple re-compression cycles. In this paper, we discuss the sources of such errors, how they are avoided in the JPEG XS design and compare the multi-generation robustness of JPEG XS with that of other codecs.",
"fno": "2937a006",
"keywords": [
"Transform Coding",
"Quantization Signal",
"Codecs",
"Wavelet Transforms",
"Image Reconstruction",
"Decoding",
"Encoding",
"Video Over IP",
"Mezzanine Codec",
"JPEG XS",
"Multi Generation Error",
"Multi Generation Robustness"
],
"authors": [
{
"affiliation": null,
"fullName": "Thomas Richter",
"givenName": "Thomas",
"surname": "Richter",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Joachim Keinert",
"givenName": "Joachim",
"surname": "Keinert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Antonin Descampe",
"givenName": "Antonin",
"surname": "Descampe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gael Rouvroy",
"givenName": "Gael",
"surname": "Rouvroy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Alexandre Willeme",
"givenName": "Alexandre",
"surname": "Willeme",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ism",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-12-01T00:00:00",
"pubType": "proceedings",
"pages": "6-13",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-2937-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2937a001",
"articleId": "12OmNCbCrSX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2937a014",
"articleId": "12OmNqGA5iK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dcc/2017/6721/0/07921907",
"title": "Error Bounds for HDR Image Coding with JPEG XT",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2017/07921907/12OmNrJROZL",
"parentPublication": {
"id": "proceedings/dcc/2017/6721/0",
"title": "2017 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2006/2746/0/274600011",
"title": "DCT-Based Image Codec Embedded Cubic Spline Interpolation with Optimal Quantization",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2006/274600011/12OmNs0kyBp",
"parentPublication": {
"id": "proceedings/ism/2006/2746/0",
"title": "Eighth IEEE International Symposium on Multimedia (ISM'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012254",
"title": "Binary tree decomposition depth coding for 3D video applications",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012254/12OmNxVlTFn",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2004/8689/0/01433700",
"title": "Multicomponent transforms for motion JPEG 2000 applications",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2004/01433700/12OmNxWuiuL",
"parentPublication": {
"id": "proceedings/isspit/2004/8689/0",
"title": "Proceedings of the Fourth IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1991/9202/0/00213331",
"title": "Effect of coefficient coding on JPEG baseline image compression",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1991/00213331/12OmNynJMVM",
"parentPublication": {
"id": "proceedings/dcc/1991/9202/0",
"title": "1991 Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdp/2015/8491/0/8491a001",
"title": "Heterogeneous Acceleration of Volumetric JPEG 2000",
"doi": null,
"abstractUrl": "/proceedings-article/pdp/2015/8491a001/12OmNzX6cpT",
"parentPublication": {
"id": "proceedings/pdp/2015/8491/0",
"title": "2015 23rd Euromicro International Conference on Parallel, Distributed and Network-Based Processing (PDP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786205",
"title": "Single-Loop Software Architecture for JPEG 2000",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786205/12OmNzlUKow",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2018/9385/0/938500a077",
"title": "Comparing CNNs and JPEG for Real-Time Multi-view Streaming in Tele-Immersive Scenarios",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2018/938500a077/19RStGy0Zgc",
"parentPublication": {
"id": "proceedings/sitis/2018/9385/0",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150597",
"title": "Adapting JPEG XS gains and priorities to tasks and contents",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150597/1lPHz3F7feE",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09453114",
"title": "JPEG Robust Invertible Grayscale",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09453114/1ulCAbr1xpC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBp52xL",
"title": "Genetic and Evolutionary Computing, International Conference on",
"acronym": "icgec",
"groupId": "1800291",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNviHK5M",
"doi": "10.1109/ICGEC.2010.156",
"title": "Fast Macro-block Selection Algorithm Using 2-D Haar Wavelet Features for H.264 Video Codec",
"normalizedTitle": "Fast Macro-block Selection Algorithm Using 2-D Haar Wavelet Features for H.264 Video Codec",
"abstract": "This paper proposes a fast macro block selection algorithm using Haar wavelet features for H.264/AVC video codec system. The proposed algorithm makes use of the two-dimensional Haar wavelet transform to estimate the sub-band energy of each macro-block in a given video frame. Then the sub-band energy can be applied to the macro-block mode decision as a primary parameter. To prevent the error segmentation, the proposed algorithm uses the rate-distortion cost (RDcost) mechanism in the mode decision procedure. Various experimental results show that the proposed algorithm can effectively make a macro-block mode decision under an RDcost value. Furthermore, the execution time of the proposed wavelet-based macro-block selection algorithm is faster 2~4 times than that of the high complexity mode algorithm with the similar macro-block mode decision results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a fast macro block selection algorithm using Haar wavelet features for H.264/AVC video codec system. The proposed algorithm makes use of the two-dimensional Haar wavelet transform to estimate the sub-band energy of each macro-block in a given video frame. Then the sub-band energy can be applied to the macro-block mode decision as a primary parameter. To prevent the error segmentation, the proposed algorithm uses the rate-distortion cost (RDcost) mechanism in the mode decision procedure. Various experimental results show that the proposed algorithm can effectively make a macro-block mode decision under an RDcost value. Furthermore, the execution time of the proposed wavelet-based macro-block selection algorithm is faster 2~4 times than that of the high complexity mode algorithm with the similar macro-block mode decision results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a fast macro block selection algorithm using Haar wavelet features for H.264/AVC video codec system. The proposed algorithm makes use of the two-dimensional Haar wavelet transform to estimate the sub-band energy of each macro-block in a given video frame. Then the sub-band energy can be applied to the macro-block mode decision as a primary parameter. To prevent the error segmentation, the proposed algorithm uses the rate-distortion cost (RDcost) mechanism in the mode decision procedure. Various experimental results show that the proposed algorithm can effectively make a macro-block mode decision under an RDcost value. Furthermore, the execution time of the proposed wavelet-based macro-block selection algorithm is faster 2~4 times than that of the high complexity mode algorithm with the similar macro-block mode decision results.",
"fno": "4281a610",
"keywords": [
"H 264 Video Codec",
"Macro Block Selection",
"Wavelet Transform"
],
"authors": [
{
"affiliation": null,
"fullName": "Chung-Hsien Chang",
"givenName": "Chung-Hsien",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shi-Huang Chen",
"givenName": "Shi-Huang",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jhing-Fa Wang",
"givenName": "Jhing-Fa",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icgec",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-12-01T00:00:00",
"pubType": "proceedings",
"pages": "610-613",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4281-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4281a606",
"articleId": "12OmNCfjepc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4281a614",
"articleId": "12OmNwLOYTB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icinis/2010/4249/0/4249a576",
"title": "A Novel Inter-frame Mode Decision Prediction Algorithm for H.264/AVC",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2010/4249a576/12OmNApcuB4",
"parentPublication": {
"id": "proceedings/icinis/2010/4249/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2011/4588/0/4588a645",
"title": "Efficient Block Mode Decision and Prediction Mode Selection for Intra Prediction in H.264/AVC High Profile",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2011/4588a645/12OmNrYCXMn",
"parentPublication": {
"id": "proceedings/dicta/2011/4588/0",
"title": "2011 International Conference on Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/psivt/2010/4285/0/4285a247",
"title": "Zero-block Mode Decision Algorithm for High Bit-Rate Coding in H.264/AVC",
"doi": null,
"abstractUrl": "/proceedings-article/psivt/2010/4285a247/12OmNvnwVr2",
"parentPublication": {
"id": "proceedings/psivt/2010/4285/0",
"title": "Image and Video Technology, Pacific-Rim Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2010/4249/0/4249a494",
"title": "An Improved Encoding Algorithm for H.264/AVC Based on the Character of Macro-block",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2010/4249a494/12OmNwD1pUB",
"parentPublication": {
"id": "proceedings/icinis/2010/4249/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2010/4222/0/4222a647",
"title": "A Fast Inter-frame Prediction Algorithm for H.264/AVC",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2010/4222a647/12OmNwJgALG",
"parentPublication": {
"id": "proceedings/iih-msp/2010/4222/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a509",
"title": "System Design of Perceptual Quality-Regulable H.264 Video Encoder",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a509/12OmNwM6A0j",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2010/4222/0/4222a651",
"title": "A Self-Adaptive and Fast Motion Estimation Search Method for H.264/AVC",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2010/4222a651/12OmNx4Q6L1",
"parentPublication": {
"id": "proceedings/iih-msp/2010/4222/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2011/4455/0/4455a535",
"title": "Fast Mode Decision Based on All-Zero Block in H.264/AVC",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2011/4455a535/12OmNxZkhvY",
"parentPublication": {
"id": "proceedings/icdma/2011/4455/0",
"title": "2011 Second International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2010/4249/0/4249a490",
"title": "A Fast Intra-frame Prediction Algorithm Based on the Feature of Macro-block for H.264/AVC",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2010/4249a490/12OmNy2rRYu",
"parentPublication": {
"id": "proceedings/icinis/2010/4249/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2010/4222/0/4222a664",
"title": "A Novel Intra-frame Prediction Algorithm Based on Macro-block's Histogram for H.264/AVC",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2010/4222a664/12OmNzVXNKU",
"parentPublication": {
"id": "proceedings/iih-msp/2010/4222/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwD1pTz",
"title": "Image Processing, International Conference on",
"acronym": "icip",
"groupId": "1000349",
"volume": "1",
"displayVolume": "1",
"year": "1995",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwvVrxR",
"doi": "10.1109/ICIP.1995.531437",
"title": "A wavelet video coder using entropy-constrained trellis coded quantization",
"normalizedTitle": "A wavelet video coder using entropy-constrained trellis coded quantization",
"abstract": "A new wavelet video coder is proposed which uses entropy-constrained trellis-coded quantization to quantize the inter-frame prediction errors of the decomposed subband data. The video sequence is first spatially decomposed into several subbands and motion compensated in the frequency domain. A simple switching mechanism is used to decide between inter-frame and intra-frame coding on a macro-block basis. Symmetric codebooks have been designed for the quantizers assuming that the subband signals are approximated by a generalized Gaussian source. The output rate is controlled by a feedback quantization control mechanism, leading to a fixed-rate system. Simulation results reported for the \"Miss America\" sequence, indicate that the proposed codec has a very good rate-distortion performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A new wavelet video coder is proposed which uses entropy-constrained trellis-coded quantization to quantize the inter-frame prediction errors of the decomposed subband data. The video sequence is first spatially decomposed into several subbands and motion compensated in the frequency domain. A simple switching mechanism is used to decide between inter-frame and intra-frame coding on a macro-block basis. Symmetric codebooks have been designed for the quantizers assuming that the subband signals are approximated by a generalized Gaussian source. The output rate is controlled by a feedback quantization control mechanism, leading to a fixed-rate system. Simulation results reported for the \"Miss America\" sequence, indicate that the proposed codec has a very good rate-distortion performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A new wavelet video coder is proposed which uses entropy-constrained trellis-coded quantization to quantize the inter-frame prediction errors of the decomposed subband data. The video sequence is first spatially decomposed into several subbands and motion compensated in the frequency domain. A simple switching mechanism is used to decide between inter-frame and intra-frame coding on a macro-block basis. Symmetric codebooks have been designed for the quantizers assuming that the subband signals are approximated by a generalized Gaussian source. The output rate is controlled by a feedback quantization control mechanism, leading to a fixed-rate system. Simulation results reported for the \"Miss America\" sequence, indicate that the proposed codec has a very good rate-distortion performance.",
"fno": "73100598",
"keywords": [
"Video Codecs Video Coding Entropy Codes Quantisation Signal Wavelet Transforms Trellis Codes Prediction Theory Image Sequences Motion Compensation Gaussian Processes Rate Distortion Theory Wavelet Video Coder Entropy Constrained Trellis Coded Quantization Interframe Prediction Errors Subband Data Video Sequence Motion Compensation Frequency Domain Switching Mechanism Interframe Coding Intraframe Coding Macroblock Symmetric Codebooks Subband Signals Generalized Gaussian Source Output Rate Feedback Quantization Control Fixed Rate System Simulation Results Image Sequence Codec Rate Distortion Performance"
],
"authors": [
{
"affiliation": "Dept. of Electr. Eng., Maryland Univ., College Park, MD, USA",
"fullName": "H. Ito",
"givenName": "H.",
"surname": "Ito",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng., Maryland Univ., College Park, MD, USA",
"fullName": "N. Farvardin",
"givenName": "N.",
"surname": "Farvardin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icip",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1995-10-01T00:00:00",
"pubType": "proceedings",
"pages": "598",
"year": "1995",
"issn": null,
"isbn": "0-8186-7310-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "73100594",
"articleId": "12OmNrkT7Ee",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "73100602",
"articleId": "12OmNxHryev",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAWH9tO",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"acronym": "icassp",
"groupId": "1000002",
"volume": "4",
"displayVolume": "4",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxX3uQr",
"doi": "10.1109/ICASSP.2002.5745635",
"title": "Full scheme of MPEG4-like codec based on wavelet transform",
"normalizedTitle": "Full scheme of MPEG4-like codec based on wavelet transform",
"abstract": "We propose a full scheme of video codec with enhanced features, which are very useful for multimedia applications. Our codec supports video object based compression: the encoder automatically detects and tracks moving video objects with minor human interactivity. Each object is represented in constrained Delaunay mesh structure and encoded in motion-compensation manner to reduce the needed bit-rate for transmission. By transforming the residual errors (used as correction-values after motion-estimation) into wavelet domain followed by quantising phase, we not only reduce the bandwidth to even lower limit but also guarantee the proper quality (up to the possibly maximum deployed level) for various end-users with a bitstream possessing a famous property - the scalability - regardless the speed of the user-access to the communication network. Simulation is also implemented to demonstrate these virtues.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a full scheme of video codec with enhanced features, which are very useful for multimedia applications. Our codec supports video object based compression: the encoder automatically detects and tracks moving video objects with minor human interactivity. Each object is represented in constrained Delaunay mesh structure and encoded in motion-compensation manner to reduce the needed bit-rate for transmission. By transforming the residual errors (used as correction-values after motion-estimation) into wavelet domain followed by quantising phase, we not only reduce the bandwidth to even lower limit but also guarantee the proper quality (up to the possibly maximum deployed level) for various end-users with a bitstream possessing a famous property - the scalability - regardless the speed of the user-access to the communication network. Simulation is also implemented to demonstrate these virtues.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a full scheme of video codec with enhanced features, which are very useful for multimedia applications. Our codec supports video object based compression: the encoder automatically detects and tracks moving video objects with minor human interactivity. Each object is represented in constrained Delaunay mesh structure and encoded in motion-compensation manner to reduce the needed bit-rate for transmission. By transforming the residual errors (used as correction-values after motion-estimation) into wavelet domain followed by quantising phase, we not only reduce the bandwidth to even lower limit but also guarantee the proper quality (up to the possibly maximum deployed level) for various end-users with a bitstream possessing a famous property - the scalability - regardless the speed of the user-access to the communication network. Simulation is also implemented to demonstrate these virtues.",
"fno": "05745635",
"keywords": [
"Watermarking",
"Robustness",
"Cryptography",
"Wavelet Transforms",
"Codecs",
"Transform Coding"
],
"authors": [
{
"affiliation": "Technical University of Budapest, Hungary",
"fullName": "Son Tran Minh",
"givenName": "Son",
"surname": "Tran Minh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Budapest, Hungary",
"fullName": "Kalman Fazekas",
"givenName": "Kalman",
"surname": "Fazekas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Université, Bordeaux-I, France",
"fullName": "Jenny Benois-Pineau",
"givenName": "Jenny",
"surname": "Benois-Pineau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Budapest, Hungary",
"fullName": "Adras Gschwindt",
"givenName": "Adras",
"surname": "Gschwindt",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icassp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-05-01T00:00:00",
"pubType": "proceedings",
"pages": "IV-4177-IV-4177",
"year": "2002",
"issn": "1520-6149",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05745634",
"articleId": "12OmNzmclm5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05745636",
"articleId": "12OmNAT0mQW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acssc/1991/2470/0/00186490",
"title": "A high quality digital HDTV codec",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1991/00186490/12OmNAkEU69",
"parentPublication": {
"id": "proceedings/acssc/1991/2470/0",
"title": "Conference Record of the Twenty-Fifth Asilomar Conference on Signals, Systems & Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcs/2017/3250/0/08035163",
"title": "An Efficient Codec for Image Compression Based on Spline Wavelet Transform and Improved SPIHT Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/hpcs/2017/08035163/12OmNAlNixP",
"parentPublication": {
"id": "proceedings/hpcs/2017/3250/0",
"title": "2017 International Conference on High-Performance Computing & Simulation (HPCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmlc/2010/6006/0/05460747",
"title": "Video Coding Technique Using Swarm Intelligence in 3-D Dual Tree Complex Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/icmlc/2010/05460747/12OmNBlXs4E",
"parentPublication": {
"id": "proceedings/icmlc/2010/6006/0",
"title": "2nd International Conference on Machine Learning and Computing (ICMLC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284914",
"title": "Video Coding using 3-D Anisotropic Dual-Tree Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284914/12OmNBqv2p5",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284607",
"title": "A Transform Domain Classification Based Wyner-Ziv Video Codec",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284607/12OmNvkpl3f",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2004/8689/0/01433700",
"title": "Multicomponent transforms for motion JPEG 2000 applications",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2004/01433700/12OmNxWuiuL",
"parentPublication": {
"id": "proceedings/isspit/2004/8689/0",
"title": "Proceedings of the Fourth IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/4/05745674",
"title": "A very low bit rate video codec with Smart Error Resiliency features for robust wireless video transmission",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745674/12OmNxwncxV",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/4",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2013/0602/0/06579384",
"title": "Reversible Data Hiding Scheme for the H.264/AVC Codec",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2013/06579384/12OmNyugyBM",
"parentPublication": {
"id": "proceedings/icisa/2013/0602/0",
"title": "2013 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dese/2016/5487/0/07930644",
"title": "HEVC Based Multi-view Video Codec Using Frame Interleaving Technique",
"doi": null,
"abstractUrl": "/proceedings-article/dese/2016/07930644/12OmNzBOhvc",
"parentPublication": {
"id": "proceedings/dese/2016/5487/0",
"title": "2016 9th International Conference on Developments in eSystems Engineering (DeSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284757",
"title": "A High Performance Motion Mode Adaptive Lifting Motion Compensation Wavelet Video Codec",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284757/12OmNzDehe7",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKiru",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WgziOF",
"doi": "10.1109/CVPRW.2018.00114",
"title": "DPW-SDNet: Dual Pixel-Wavelet Domain Deep CNNs for Soft Decoding of JPEG-Compressed Images",
"normalizedTitle": "DPW-SDNet: Dual Pixel-Wavelet Domain Deep CNNs for Soft Decoding of JPEG-Compressed Images",
"abstract": "JPEG is one of the widely used lossy compression methods. JPEG-compressed images usually suffer from compression artifacts including blocking and blurring, especially at low bit-rates. Soft decoding is an effective solution to improve the quality of compressed images without changing codec or introducing extra coding bits. Inspired by the excellent performance of the deep convolutional neural networks (CNNs) on both low-level and high-level computer vision problems, we develop a dual pixel-wavelet domain deep CNNs-based soft decoding network for JPEG-compressed images, namely DPW-SDNet. The pixel domain deep network takes the four downsampled versions of the compressed image to form a 4-channel input and outputs a pixel domain prediction, while the wavelet domain deep network uses the 1-level discrete wavelet transformation (DWT) coefficients to form a 4-channel input to produce a DWT domain prediction. The pixel domain and wavelet domain estimates are combined to generate the final soft decoded result. Experimental results demonstrate the superiority of the proposed DPW-SDNet over several state-of-the-art compression artifacts reduction algorithms.",
"abstracts": [
{
"abstractType": "Regular",
"content": "JPEG is one of the widely used lossy compression methods. JPEG-compressed images usually suffer from compression artifacts including blocking and blurring, especially at low bit-rates. Soft decoding is an effective solution to improve the quality of compressed images without changing codec or introducing extra coding bits. Inspired by the excellent performance of the deep convolutional neural networks (CNNs) on both low-level and high-level computer vision problems, we develop a dual pixel-wavelet domain deep CNNs-based soft decoding network for JPEG-compressed images, namely DPW-SDNet. The pixel domain deep network takes the four downsampled versions of the compressed image to form a 4-channel input and outputs a pixel domain prediction, while the wavelet domain deep network uses the 1-level discrete wavelet transformation (DWT) coefficients to form a 4-channel input to produce a DWT domain prediction. The pixel domain and wavelet domain estimates are combined to generate the final soft decoded result. Experimental results demonstrate the superiority of the proposed DPW-SDNet over several state-of-the-art compression artifacts reduction algorithms.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "JPEG is one of the widely used lossy compression methods. JPEG-compressed images usually suffer from compression artifacts including blocking and blurring, especially at low bit-rates. Soft decoding is an effective solution to improve the quality of compressed images without changing codec or introducing extra coding bits. Inspired by the excellent performance of the deep convolutional neural networks (CNNs) on both low-level and high-level computer vision problems, we develop a dual pixel-wavelet domain deep CNNs-based soft decoding network for JPEG-compressed images, namely DPW-SDNet. The pixel domain deep network takes the four downsampled versions of the compressed image to form a 4-channel input and outputs a pixel domain prediction, while the wavelet domain deep network uses the 1-level discrete wavelet transformation (DWT) coefficients to form a 4-channel input to produce a DWT domain prediction. The pixel domain and wavelet domain estimates are combined to generate the final soft decoded result. Experimental results demonstrate the superiority of the proposed DPW-SDNet over several state-of-the-art compression artifacts reduction algorithms.",
"fno": "610000a824",
"keywords": [
"Computer Vision",
"Data Compression",
"Decoding",
"Discrete Wavelet Transforms",
"Image Coding",
"Neural Nets",
"Wavelet Transforms",
"Wavelet Domain Estimates",
"Final Soft Decoded Result",
"DPW SD Net",
"State Of The Art Compression Artifacts Reduction Algorithms",
"JPEG Compressed Images",
"Low Bit Rates",
"Compressed Image",
"Codec",
"Deep Convolutional Neural Networks",
"Low Level",
"High Level Computer Vision Problems",
"Dual Pixel Wavelet Domain Deep CN Ns Based Soft Decoding Network",
"Pixel Domain Deep Network",
"4 Channel Input",
"Pixel Domain Prediction",
"Wavelet Domain Deep Network",
"1 Level Discrete Wavelet Transformation",
"DWT Domain Prediction",
"Lossy Compression Methods",
"Image Coding",
"Wavelet Domain",
"Decoding",
"Image Restoration",
"Training",
"Discrete Wavelet Transforms"
],
"authors": [
{
"affiliation": null,
"fullName": "Honggang Chen",
"givenName": "Honggang",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaohai He",
"givenName": "Xiaohai",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Linbo Qing",
"givenName": "Linbo",
"surname": "Qing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shuhua Xiong",
"givenName": "Shuhua",
"surname": "Xiong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Truong Q. Nguyen",
"givenName": "Truong Q.",
"surname": "Nguyen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "824-82409",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6100-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "610000a814",
"articleId": "17D45VTRovN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "610000a834",
"articleId": "17D45WYQJ9o",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2012/4875/0/4875a210",
"title": "JIRL - A C++ Library for JPEG Compressed Domain Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2012/4875a210/12OmNAiFIaw",
"parentPublication": {
"id": "proceedings/ism/2012/4875/0",
"title": "2012 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a026",
"title": "Fast JPEG Image Retrieval Based on AC Huffman Tables",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a026/12OmNBp52zc",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccima/1999/0300/0/03000285",
"title": "Visual Image Retrieval on Compressed Domain with Q-Distance",
"doi": null,
"abstractUrl": "/proceedings-article/iccima/1999/03000285/12OmNvStczX",
"parentPublication": {
"id": "proceedings/iccima/1999/0300/0",
"title": "Computational Intelligence and Multimedia Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2017/0612/0/0612a022",
"title": "Fast Compressed Domain JPEG Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2017/0612a022/12OmNxGAKX5",
"parentPublication": {
"id": "proceedings/icvisp/2017/0612/0",
"title": "2017 International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2012/4875/0/4875a485",
"title": "Exploiting JPEG Compression for Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2012/4875a485/12OmNyKa5Yj",
"parentPublication": {
"id": "proceedings/ism/2012/4875/0",
"title": "2012 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdcat/2005/2405/0/24051058",
"title": "Toward Blind Logo Watermarking in JPEG-Compressed Images",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2005/24051058/12OmNyRPgWL",
"parentPublication": {
"id": "proceedings/pdcat/2005/2405/0",
"title": "Sixth International Conference on Parallel and Distributed Computing Applications and Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2015/7143/0/7143a240",
"title": "Based on the Characteristics of the JPEG Image Retrieval System Research",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2015/7143a240/12OmNyvGykP",
"parentPublication": {
"id": "proceedings/icmtma/2015/7143/0",
"title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851c764",
"title": "D3: Deep Dual-Domain Based Fast Restoration of JPEG-Compressed Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851c764/12OmNzDNtvx",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/2/81832334",
"title": "Processing JPEG-compressed images",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81832334/12OmNzwHvn9",
"parentPublication": {
"id": "proceedings/icip/1997/8183/2",
"title": "Proceedings of International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07299153",
"title": "Data-driven sparsity-based restoration of JPEG-compressed images in dual transform-pixel domain",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07299153/12OmNzwpUjf",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy4IF3s",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzVoBD5",
"doi": "10.1109/ICALT.2018.00094",
"title": "Virtual Reality Learning Environments for Vocational Education: A Comparison Study with Conventional Instructional Media on Knowledge Retention",
"normalizedTitle": "Virtual Reality Learning Environments for Vocational Education: A Comparison Study with Conventional Instructional Media on Knowledge Retention",
"abstract": "Immersive learning environments are increasingly being adopted as modern alternative presentation modes in training scenarios. Presently, the improved levels of engagement and interactions offered by these systems are expected to potentially motivate the learners more than conventional 2D interfaces. This study compares the effectiveness of immersive environments with conventional 2D learning content for a motorcycle labeling task by evaluating knowledge retention and recall. The study was conducted with two groups of participants: a VR interaction group and a tablet based 2D interaction group. The enhanced spatial interaction capability of VR was hypothesized to promote knowledge retention and improve the instructional utility of immersive learning solutions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Immersive learning environments are increasingly being adopted as modern alternative presentation modes in training scenarios. Presently, the improved levels of engagement and interactions offered by these systems are expected to potentially motivate the learners more than conventional 2D interfaces. This study compares the effectiveness of immersive environments with conventional 2D learning content for a motorcycle labeling task by evaluating knowledge retention and recall. The study was conducted with two groups of participants: a VR interaction group and a tablet based 2D interaction group. The enhanced spatial interaction capability of VR was hypothesized to promote knowledge retention and improve the instructional utility of immersive learning solutions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Immersive learning environments are increasingly being adopted as modern alternative presentation modes in training scenarios. Presently, the improved levels of engagement and interactions offered by these systems are expected to potentially motivate the learners more than conventional 2D interfaces. This study compares the effectiveness of immersive environments with conventional 2D learning content for a motorcycle labeling task by evaluating knowledge retention and recall. The study was conducted with two groups of participants: a VR interaction group and a tablet based 2D interaction group. The enhanced spatial interaction capability of VR was hypothesized to promote knowledge retention and improve the instructional utility of immersive learning solutions.",
"fno": "604901a385",
"keywords": [
"Computer Aided Instruction",
"User Interfaces",
"Virtual Reality",
"Virtual Reality Learning Environments",
"Motorcycle Labeling Task",
"Conventional 2 D Learning Content",
"Immersive Environments",
"Conventional 2 D Interfaces",
"Training Scenarios",
"Immersive Learning Environments",
"Conventional Instructional Media",
"Comparison Study",
"Vocational Education",
"Immersive Learning Solutions",
"Instructional Utility",
"Knowledge Retention",
"2 D Interaction Group",
"VR Interaction Group",
"Task Analysis",
"Motorcycles",
"Training",
"Virtual Reality",
"Labeling",
"Two Dimensional Displays",
"Solid Modeling",
"Virtual Reality",
"Skill Training",
"Knowledge Retention",
"Declarative Memory",
"Performance Comparison",
"Recall Task"
],
"authors": [
{
"affiliation": "AMMACHI Labs., Amrita Vishwa Vidyapeetham, Amritapuri, India",
"fullName": "Sooraj K. Babu",
"givenName": "Sooraj",
"surname": "K. Babu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "AMMACHI Labs., Amrita Vishwa Vidyapeetham, Amritapuri, India",
"fullName": "Sooraj Krishna",
"givenName": "Sooraj",
"surname": "Krishna",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "AMMACHI Labs., Amrita Vishwa Vidyapeetham, Amritapuri, India",
"fullName": "Unnikrishnan R.",
"givenName": "Unnikrishnan",
"surname": "R.",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "AMMACHI Labs., Amrita Vishwa Vidyapeetham, Amritapuri, India",
"fullName": "Rao R. Bhavani",
"givenName": "Rao R.",
"surname": "Bhavani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "385-389",
"year": "2018",
"issn": "2161-377X",
"isbn": "978-1-5386-6049-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "604901a380",
"articleId": "12OmNwxlrkI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "604901a390",
"articleId": "12OmNApLGIP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892286",
"title": "Comparing VR and non-VR driving simulations: An experimental user study",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892286/12OmNxymobo",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/02/mcg2018020057",
"title": "An Analysis of VR Technology Used in Immersive Simulations with a Serious Game Perspective",
"doi": null,
"abstractUrl": "/magazine/cg/2018/02/mcg2018020057/13rRUwh80Nv",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a291",
"title": "Virtual Reality Observations: Using Virtual Reality to Augment Lab-Based Shoulder Surfing Research",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a291/1CJcrwDUDgk",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2019/2286/0/228600a495",
"title": "The Effect of Practice Distribution on Skill Retention in Virtual Reality Temporal Bone Surgery Training",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2019/228600a495/1cdO0grsecg",
"parentPublication": {
"id": "proceedings/cbms/2019/2286/0",
"title": "2019 IEEE 32nd International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998292",
"title": "Immersive Process Model Exploration in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998292/1hpPCy1gJoI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090482",
"title": "Immersive VR and Embodied Learning: The Role of Embodied Affordances in The Long-term Retention of Semantic Knowledge",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090482/1jIxrm4VUxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/searis/2018/6272/0/09180231",
"title": "VD1: a technical approach to a hybrid 2D and 3D desktop environment",
"doi": null,
"abstractUrl": "/proceedings-article/searis/2018/09180231/1mK7jmn4lbO",
"parentPublication": {
"id": "proceedings/searis/2018/6272/0",
"title": "2018 IEEE 11th Workshop on Software Engineering and Architectures for Real-time Interactive Systems (SEARIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09212653",
"title": "Breaking the Screen: Interaction Across Touchscreen Boundaries in Virtual Reality for Mobile Knowledge Workers",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09212653/1nG96pJ3dKg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a078",
"title": "Modeling Emotions for Training in Immersive Simulations (METIS): A Cross-Platform Virtual Classroom Study",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a078/1pBMeXqNvhK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a283",
"title": "User Study on Virtual Reality for Design Reviews in Architecture",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a283/1pBMhwWNphC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJdQbsLPZ6",
"doi": "10.1109/VRW55335.2022.00255",
"title": "Towards a Virtual Reality Math Game for Learning In Schools - A User Study",
"normalizedTitle": "Towards a Virtual Reality Math Game for Learning In Schools - A User Study",
"abstract": "In recent years, immersive Virtual Reality (VR) has gained popularity among young users as a new technology for entertainment gaming. While VR remains majorly used for entertainment purposes, 3D desktop games are already used in schools. This study takes a closer look at the suitability for VR games to be used in a formal educational environment, and its potential to enrich existing game based learning approaches. Based on learning needs of in particular easily distracted and inattentive children, an immersive VR math game was created and tested on 15 children aged 11–12.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years, immersive Virtual Reality (VR) has gained popularity among young users as a new technology for entertainment gaming. While VR remains majorly used for entertainment purposes, 3D desktop games are already used in schools. This study takes a closer look at the suitability for VR games to be used in a formal educational environment, and its potential to enrich existing game based learning approaches. Based on learning needs of in particular easily distracted and inattentive children, an immersive VR math game was created and tested on 15 children aged 11–12.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years, immersive Virtual Reality (VR) has gained popularity among young users as a new technology for entertainment gaming. While VR remains majorly used for entertainment purposes, 3D desktop games are already used in schools. This study takes a closer look at the suitability for VR games to be used in a formal educational environment, and its potential to enrich existing game based learning approaches. Based on learning needs of in particular easily distracted and inattentive children, an immersive VR math game was created and tested on 15 children aged 11–12.",
"fno": "840200a808",
"keywords": [
"Computer Aided Instruction",
"Computer Games",
"Paediatrics",
"Virtual Reality",
"VR Games",
"Formal Educational Environment",
"Existing Game",
"Learning Approaches",
"Immersive VR Math Game",
"Virtual Reality Math Game",
"Immersive Virtual Reality",
"Young Users",
"Entertainment Gaming",
"Entertainment Purposes",
"3 D Desktop Games",
"Three Dimensional Displays",
"Conferences",
"Entertainment Industry",
"Games",
"Virtual Reality",
"User Interfaces",
"Aging",
"Immersive Virtual Reality",
"Game Based Learning",
"Applied Immersive Gaming",
"Serious Games"
],
"authors": [
{
"affiliation": "NZ University of Canterbury,HIT Lab",
"fullName": "Meike Belter",
"givenName": "Meike",
"surname": "Belter",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NZ University of Canterbury,HIT Lab",
"fullName": "Heide Lukosch",
"givenName": "Heide",
"surname": "Lukosch",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "808-809",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJdQ7Xl6lG",
"name": "pvrw202284020-09757619s1-mm_840200a808.zip",
"size": "530 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757619s1-mm_840200a808.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a806",
"articleId": "1CJcNcP5uEg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a810",
"articleId": "1CJfavqESje",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2014/4038/0/4038a662",
"title": "A Survey of Frameworks and Game Engines for Serious Game Development",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2014/4038a662/12OmNxxNbRP",
"parentPublication": {
"id": "proceedings/icalt/2014/4038/0",
"title": "2014 IEEE 14th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2015/8843/0/8843a001",
"title": "An Indoor Navigation System for Live-Action Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2015/8843a001/12OmNy5hRdE",
"parentPublication": {
"id": "proceedings/sbgames/2015/8843/0",
"title": "2015 14th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446575",
"title": "Social Presence and Cooperation in Large-Scale Multi-User Virtual Reality - The Relevance of Social Interdependence for Location-Based Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446575/13bd1eOELLA",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2007/2900/0/04272088",
"title": "An Immersive Game for K-5 Math and Science Education",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2007/04272088/17D45X7VTfV",
"parentPublication": {
"id": "proceedings/iv/2007/2900/0",
"title": "2007 11th International Conference Information Visualization (IV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699256",
"title": "3rd Virtual and Augmented Reality for Good (VAR4Good) Workshop",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699256/19F1VrcNC7u",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798014",
"title": "Project Butterfly: Synergizing Immersive Virtual Reality with Actuated Soft Exosuit for Upper-Extremity Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798014/1cJ19OsmFAk",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090421",
"title": "Analysis of Interaction Spaces for VR in Public Transport Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090421/1jIxr9dj52o",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a689",
"title": "Virtual/Mixed Reality Control of a Game Through Scratch",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a689/1rSRaGM24OA",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a195",
"title": "Novel Augmented Reality Enhanced Solution towards Vocational Training for People with Mental Disabilities",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a195/1yeQGgQscAU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a453",
"title": "Using Context and Physiological Cues to Improve Emotion Recognition in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a453/1yfxJ6xhCww",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0FJEiRHO",
"doi": "10.1109/VR.2019.8797755",
"title": "Virtual Reality Instruction Followed by Enactment Can Increase Procedural Knowledge in a Science Lesson",
"normalizedTitle": "Virtual Reality Instruction Followed by Enactment Can Increase Procedural Knowledge in a Science Lesson",
"abstract": "A 2×2 between-subjects experiment (a) investigated and compared the instructional effectiveness of immersive virtual reality (VR) versus video as media for teaching scientific procedural knowledge, and (b) examined the efficacy of enactment as a generative learning strategy in combination with the respective instructional media. A total of 117 high school students (74 females) were randomly distributed across four instructional groups - VR and enactment, video and enactment, only VR, and only video. Outcome measures included declarative knowledge, procedural knowledge, knowledge transfer, and subjective ratings of perceived enjoyment. Results indicated that there were no main effects or interactions for the outcomes of declarative knowledge or transfer. However, there was a significant interaction between media and method for the outcome of procedural knowledge with the VR and enactment group having the highest performance. Furthermore, media also seemed to have a significant effect on student perceived enjoyment, indicating that the groups enjoyed the VR simulation significantly more than the video. The results deepen our understanding of how we learn with immersive technology, as well as suggest important implications for implementing VR in schools.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A 2×2 between-subjects experiment (a) investigated and compared the instructional effectiveness of immersive virtual reality (VR) versus video as media for teaching scientific procedural knowledge, and (b) examined the efficacy of enactment as a generative learning strategy in combination with the respective instructional media. A total of 117 high school students (74 females) were randomly distributed across four instructional groups - VR and enactment, video and enactment, only VR, and only video. Outcome measures included declarative knowledge, procedural knowledge, knowledge transfer, and subjective ratings of perceived enjoyment. Results indicated that there were no main effects or interactions for the outcomes of declarative knowledge or transfer. However, there was a significant interaction between media and method for the outcome of procedural knowledge with the VR and enactment group having the highest performance. Furthermore, media also seemed to have a significant effect on student perceived enjoyment, indicating that the groups enjoyed the VR simulation significantly more than the video. The results deepen our understanding of how we learn with immersive technology, as well as suggest important implications for implementing VR in schools.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A 2×2 between-subjects experiment (a) investigated and compared the instructional effectiveness of immersive virtual reality (VR) versus video as media for teaching scientific procedural knowledge, and (b) examined the efficacy of enactment as a generative learning strategy in combination with the respective instructional media. A total of 117 high school students (74 females) were randomly distributed across four instructional groups - VR and enactment, video and enactment, only VR, and only video. Outcome measures included declarative knowledge, procedural knowledge, knowledge transfer, and subjective ratings of perceived enjoyment. Results indicated that there were no main effects or interactions for the outcomes of declarative knowledge or transfer. However, there was a significant interaction between media and method for the outcome of procedural knowledge with the VR and enactment group having the highest performance. Furthermore, media also seemed to have a significant effect on student perceived enjoyment, indicating that the groups enjoyed the VR simulation significantly more than the video. The results deepen our understanding of how we learn with immersive technology, as well as suggest important implications for implementing VR in schools.",
"fno": "08797755",
"keywords": [
"Computer Aided Instruction",
"Teaching",
"Virtual Reality",
"Generative Learning Strategy",
"Instructional Groups",
"Declarative Knowledge",
"Knowledge Transfer",
"Student Perceived Enjoyment",
"VR Simulation",
"Immersive Virtual Reality",
"Instructional Media",
"High School Students",
"Virtual Reality Instruction",
"Scientific Procedural Knowledge Teaching",
"Science Lesson",
"Media",
"Biological System Modeling",
"Virtual Reality",
"Education",
"Solid Modeling",
"Psychology",
"Knowledge Transfer",
"Virtual Reality",
"Generative Learning Strategy",
"Enactment",
"Learning",
"Procedural Knowledge"
],
"authors": [
{
"affiliation": "Aalborg University",
"fullName": "Niels Koch Andreasen",
"givenName": "Niels Koch",
"surname": "Andreasen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Copenhagen",
"fullName": "Sarune Baceviciute",
"givenName": "Sarune",
"surname": "Baceviciute",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Roskilde University",
"fullName": "Prajakt Pande",
"givenName": "Prajakt",
"surname": "Pande",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University",
"fullName": "Guido Makransky",
"givenName": "Guido",
"surname": "Makransky",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "840-841",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797981",
"articleId": "1cJ1fWzOe8U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797785",
"articleId": "1cJ0HqCLp96",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2018/6049/0/604901a385",
"title": "Virtual Reality Learning Environments for Vocational Education: A Comparison Study with Conventional Instructional Media on Knowledge Retention",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a385/12OmNzVoBD5",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446505",
"title": "Keynote Speaker is Clinical Virtual Reality Ready for Primetime?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446505/13bd1gzWkRi",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2018/7123/0/08493426",
"title": "How Real Can Virtual Become?",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493426/14tNJnWdtTy",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2022/9248/0/924800a037",
"title": "Investigating the Effect of Immersive VR on Conceptual Knowledge and Procedural Knowledge Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2022/924800a037/1MrSPQhOcgM",
"parentPublication": {
"id": "proceedings/eitt/2022/9248/0",
"title": "2022 Eleventh International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a311",
"title": "Affective Virtual Reality System (AVRS): Design and Ratings of Affective VR Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a311/1ap5C3hrD6o",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797972",
"title": "Evaluation of Maslows Hierarchy of Needs on Long-Term Use of HMDs – A Case Study of Office Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797972/1cJ0V5mcpB6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797805",
"title": "Encouraging Rehabilitation Trials: The Potential of 360° Immersive Instruction Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797805/1cJ13iaKgve",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797828",
"title": "Immersive Virtual Reality and Gamification Within Procedurally Generated Environments to Increase Motivation During Gait Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797828/1cJ13n6aEsE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smc-it/2019/1545/0/154500a070",
"title": "Procedural Generation of 3D Planetary-Scale Terrains",
"doi": null,
"abstractUrl": "/proceedings-article/smc-it/2019/154500a070/1e10ss3Ote8",
"parentPublication": {
"id": "proceedings/smc-it/2019/1545/0",
"title": "2019 IEEE International Conference on Space Mission Challenges for Information Technology (SMC-IT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/10/09095367",
"title": "PICO: Procedural Iterative Constrained Optimizer for Geometric Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2021/10/09095367/1jVMiYPPf0I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxukBU3g4",
"doi": "10.1109/VRW50115.2020.00089",
"title": "Using Screen Capture Video to Understand Learning in Virtual Reality",
"normalizedTitle": "Using Screen Capture Video to Understand Learning in Virtual Reality",
"abstract": "Research on immersive learning in natural setting such as schools, especially with/in virtual reality (VR), is only just emerging. While current inquiry concentrates on measuring content and procedural knowledge acquisition and to a lesser extent attitudinal change associated with exposure to VR applications, scant attention has been paid to what learners actually do in immersive virtual environments. This includes on- and off-task behavior, higher order thinking, and regulation of learning related to self and others. This methodological paper describes the use of screen capture video as a means of recording and analysing these types of learning behaviors in virtual environments with junior high school students. The paper outlines barriers, limitations and benefits to using screen capture video and the types of inductive and deductive analysis conducted as part of the research. It concludes that screen capture video is a promising medium for understanding verbal and non-verbal learning behavior as it actually unfolds in immersive VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Research on immersive learning in natural setting such as schools, especially with/in virtual reality (VR), is only just emerging. While current inquiry concentrates on measuring content and procedural knowledge acquisition and to a lesser extent attitudinal change associated with exposure to VR applications, scant attention has been paid to what learners actually do in immersive virtual environments. This includes on- and off-task behavior, higher order thinking, and regulation of learning related to self and others. This methodological paper describes the use of screen capture video as a means of recording and analysing these types of learning behaviors in virtual environments with junior high school students. The paper outlines barriers, limitations and benefits to using screen capture video and the types of inductive and deductive analysis conducted as part of the research. It concludes that screen capture video is a promising medium for understanding verbal and non-verbal learning behavior as it actually unfolds in immersive VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Research on immersive learning in natural setting such as schools, especially with/in virtual reality (VR), is only just emerging. While current inquiry concentrates on measuring content and procedural knowledge acquisition and to a lesser extent attitudinal change associated with exposure to VR applications, scant attention has been paid to what learners actually do in immersive virtual environments. This includes on- and off-task behavior, higher order thinking, and regulation of learning related to self and others. This methodological paper describes the use of screen capture video as a means of recording and analysing these types of learning behaviors in virtual environments with junior high school students. The paper outlines barriers, limitations and benefits to using screen capture video and the types of inductive and deductive analysis conducted as part of the research. It concludes that screen capture video is a promising medium for understanding verbal and non-verbal learning behavior as it actually unfolds in immersive VR.",
"fno": "09090623",
"keywords": [
"Task Analysis",
"Encoding",
"Virtual Environments",
"Human Computer Interaction",
"Collaboration",
"Headphones",
"Virtual Reality",
"School",
"Learning",
"STEM",
"Science Education",
"Children",
"Observational Method",
"Learning Regulation",
"Human Centred Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Social And Professional Topics",
"User Characteristics",
"Age",
"Children",
"Human Centred Computing",
"Human Computer Interaction HCI",
"HCI Design And Evaluation Methods",
"Field Studies"
],
"authors": [
{
"affiliation": "University of Newcastle,Australia",
"fullName": "Erica Southgate",
"givenName": "Erica",
"surname": "Southgate",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "418-421",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090525",
"articleId": "1jIxjKMQpIk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090458",
"articleId": "1jIxiPxPIpG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/computationworld/2009/3862/0/3862a460",
"title": "Depth Perception within Virtual Environments: A Comparative Study Between Wide Screen Stereoscopic Displays and Head Mounted Devices",
"doi": null,
"abstractUrl": "/proceedings-article/computationworld/2009/3862a460/12OmNwJPN1e",
"parentPublication": {
"id": "proceedings/computationworld/2009/3862/0",
"title": "Future Computing, Service Computation, Cognitive, Adaptive, Content, Patterns, Computation World",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504761",
"title": "Avatar realism and social interaction quality in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2022/7180/0/718000a728",
"title": "Virtual Reality-Based Gymnastics Visualization Using Real-Time Motion Capture Suit",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2022/718000a728/1JeEp76ujg4",
"parentPublication": {
"id": "proceedings/mass/2022/7180/0",
"title": "2022 IEEE 19th International Conference on Mobile Ad Hoc and Smart Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089551",
"title": "A Structural Equation Modeling Approach to Understand the Relationship between Control, Cybersickness and Presence in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089551/1jIx95ncylO",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089482",
"title": "Think Twice: The Influence of Immersion on Decision Making during Gambling in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089482/1jIxeHgBEkg",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090516",
"title": "Investigating Trainees’ Nonverbal Behaviors in Virtual Patients Communication in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090516/1jIxjtFwen6",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a361",
"title": "Pen-based Interaction with Spreadsheets in Mobile Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a361/1pysxojAVAk",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a068",
"title": "Verbal Mimicry Predicts Social Distance and Social Attraction to an Outgroup Member in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a068/1qpzC44fheg",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvris/2020/9636/0/963600a112",
"title": "Research on the Visual Language of VR Animation in the Multi-screen Interactive Era",
"doi": null,
"abstractUrl": "/proceedings-article/icvris/2020/963600a112/1x4YZwzXQre",
"parentPublication": {
"id": "proceedings/icvris/2020/9636/0",
"title": "2020 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vissoft/2021/3144/0/314400a012",
"title": "CodeCity: On-Screen or in Virtual Reality?",
"doi": null,
"abstractUrl": "/proceedings-article/vissoft/2021/314400a012/1yrHskxcdl6",
"parentPublication": {
"id": "proceedings/vissoft/2021/3144/0",
"title": "2021 Working Conference on Software Visualization (VISSOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pBMeBWXAZ2",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pBMeXqNvhK",
"doi": "10.1109/ISMAR-Adjunct51615.2020.00036",
"title": "Modeling Emotions for Training in Immersive Simulations (METIS): A Cross-Platform Virtual Classroom Study",
"normalizedTitle": "Modeling Emotions for Training in Immersive Simulations (METIS): A Cross-Platform Virtual Classroom Study",
"abstract": "Virtual training environments (VTEs) using immersive technology have been able to successfully provide training for technical skills. Combined with recent advances in virtual social agent technologies and in affective computing, VTEs can now also support the training of social skills. Research looking at the effects of different immersive technologies on users' experience (UX) can provide important insights about their impact on user's engagement with the technology, sense presence and co-presence. However, current studies do not address whether emotions displayed by virtual agents provide the same level of UX across different virtual reality (VR) platforms. In this study, we considered a virtual classroom simulator built for desktop computer, and adapted for an immersive VR platform (CAVE). Users interact with virtual animated disruptive students able to display facial expressions, to help them practice their classroom behavior management skills. We assessed effects of the VR platforms and of the display of facial expressions on presence, co-presence, engagement, and believability. Results indicate that users were engaged, found the virtual students believable and felt presence and co-presence for both VR platforms. We also observed an interaction effects of facial expressions and VR platforms for co-presence (p = .018 <; .05).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual training environments (VTEs) using immersive technology have been able to successfully provide training for technical skills. Combined with recent advances in virtual social agent technologies and in affective computing, VTEs can now also support the training of social skills. Research looking at the effects of different immersive technologies on users' experience (UX) can provide important insights about their impact on user's engagement with the technology, sense presence and co-presence. However, current studies do not address whether emotions displayed by virtual agents provide the same level of UX across different virtual reality (VR) platforms. In this study, we considered a virtual classroom simulator built for desktop computer, and adapted for an immersive VR platform (CAVE). Users interact with virtual animated disruptive students able to display facial expressions, to help them practice their classroom behavior management skills. We assessed effects of the VR platforms and of the display of facial expressions on presence, co-presence, engagement, and believability. Results indicate that users were engaged, found the virtual students believable and felt presence and co-presence for both VR platforms. We also observed an interaction effects of facial expressions and VR platforms for co-presence (p = .018 <; .05).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual training environments (VTEs) using immersive technology have been able to successfully provide training for technical skills. Combined with recent advances in virtual social agent technologies and in affective computing, VTEs can now also support the training of social skills. Research looking at the effects of different immersive technologies on users' experience (UX) can provide important insights about their impact on user's engagement with the technology, sense presence and co-presence. However, current studies do not address whether emotions displayed by virtual agents provide the same level of UX across different virtual reality (VR) platforms. In this study, we considered a virtual classroom simulator built for desktop computer, and adapted for an immersive VR platform (CAVE). Users interact with virtual animated disruptive students able to display facial expressions, to help them practice their classroom behavior management skills. We assessed effects of the VR platforms and of the display of facial expressions on presence, co-presence, engagement, and believability. Results indicate that users were engaged, found the virtual students believable and felt presence and co-presence for both VR platforms. We also observed an interaction effects of facial expressions and VR platforms for co-presence (p = .018 <; .05).",
"fno": "767500a078",
"keywords": [
"Affective Computing",
"Computer Based Training",
"Computer Simulation",
"User Experience",
"Virtual Reality",
"Virtual Classroom Simulator",
"Desktop Computer",
"Immersive VR Platform",
"Virtual Animated Disruptive Students",
"Facial Expressions",
"Classroom Behavior Management Skills",
"Virtual Students",
"Interaction Effects",
"Cross Platform Virtual Classroom Study",
"Virtual Training Environments",
"VT Es",
"Technical Skills",
"Virtual Social Agent Technologies",
"Affective Computing",
"Social Skills",
"Sense Presence",
"Virtual Agents",
"Modeling Emotions For Training In Immersive Simulations",
"METIS",
"Users Experience",
"User Engagement",
"CAVE",
"Training",
"Solid Modeling",
"Affective Computing",
"Resists",
"Augmented Reality",
"Immersive Virtual Environment",
"Virtual Reality",
"Affective Computing",
"User Experience",
"Evaluation",
"Classroom Simulations"
],
"authors": [
{
"affiliation": "Florida International University,VISAGE Lab, SCIS",
"fullName": "Alban Delamarre",
"givenName": "Alban",
"surname": "Delamarre",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Florida International University,VISAGE Lab, SCIS",
"fullName": "Christine Lisetti",
"givenName": "Christine",
"surname": "Lisetti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "LAB-STICC CNRS, ENIB",
"fullName": "Cédric Buche",
"givenName": "Cédric",
"surname": "Buche",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "78-83",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7675-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "767500a076",
"articleId": "1pBMiNAfDpK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "767500a084",
"articleId": "1pBMjFD8jVm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "mags/cg/2018/02/mcg2018020057",
"title": "An Analysis of VR Technology Used in Immersive Simulations with a Serious Game Perspective",
"doi": null,
"abstractUrl": "/magazine/cg/2018/02/mcg2018020057/13rRUwh80Nv",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2021/02/08675340",
"title": "Immersive Virtual Reality Attacks and the Human Joystick",
"doi": null,
"abstractUrl": "/journal/tq/2021/02/08675340/18K0AX3AgRW",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a013",
"title": "The Impact of Non-immersive Virtual Reality Technologies on Consumers' Behaviors in real estate: A Website's Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a013/1J7W7B41hyE",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a085",
"title": "MEinVR: Multimodal Interaction Paradigms in Immersive Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a085/1J7W98ABKwM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797828",
"title": "Immersive Virtual Reality and Gamification Within Procedurally Generated Environments to Increase Motivation During Gait Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797828/1cJ13n6aEsE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2017/0459/0/07938144",
"title": "Creating immersive and aesthetic auditory spaces in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2017/07938144/1h0L85rWBYA",
"parentPublication": {
"id": "proceedings/sive/2017/0459/0",
"title": "2017 IEEE 3rd VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a260",
"title": "Influence of hand visualization on tool-based motor skills training in an immersive VR simulator",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a260/1pyswAXnugM",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a589",
"title": "Simulation and Assessment of Safety Procedure in an Immersive Virtual Reality (IVR) Laboratory",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a589/1tnXRaYRcdi",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/transai/2021/3412/0/341200a017",
"title": "An Immersive Model of User Trust in Conversational Agents in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/transai/2021/341200a017/1xNNxZKbIzK",
"parentPublication": {
"id": "proceedings/transai/2021/3412/0",
"title": "2021 Third International Conference on Transdisciplinary AI (TransAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ds-rt/2021/3326/0/09576155",
"title": "Towards an immersive visualization of consumer-level simulations of vehicular traffic",
"doi": null,
"abstractUrl": "/proceedings-article/ds-rt/2021/09576155/1y63CMoc3o4",
"parentPublication": {
"id": "proceedings/ds-rt/2021/3326/0",
"title": "2021 IEEE/ACM 25th International Symposium on Distributed Simulation and Real Time Applications (DS-RT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXQzeM5Tq",
"doi": "10.1109/VRW52623.2021.00244",
"title": "DC: Clinical Application of Immersive VR in Spatial Cognition: The Assessment of Spatial Memory and Unilateral Spatial Neglect in Neurological Patients",
"normalizedTitle": "DC: Clinical Application of Immersive VR in Spatial Cognition: The Assessment of Spatial Memory and Unilateral Spatial Neglect in Neurological Patients",
"abstract": "Visual-spatial impairments and associated cognitive functions are hard to examine by traditional neuropsychological tests, but they have high potential to be examined in an ecologically valid way through the simulation of spatial information provided by immersive Virtual Reality (VR). This PhD research proposes two studies to investigate the clinical applicability and construct validity of immersive VR in neuropsychological rehabilitation: 1) The immersive Virtual Memory Task (imVMT) was developed to examine spatial memory in a broad range of neurological patients. The main purpose was to apply a gesture-based natural hand interaction and to see in how far typical handicaps such as hemiparesis or visual field defects interfere with task performance. On a neuropsychological level, we aim to identify VR parameters which reveal and differentiate underlying cognitive sub-processes. 2) A virtual road crossing task (iVRoad) was developed to detect discrete symptoms of unilateral spatial neglect (USN) in right-hemispheric post-stroke patients. The aims are a) to externally validate iVRoad using conventional neuropsychological tests, b) to identify and evaluate relevant behavioral and eye tracking parameters to distinguish post-stroke patients with and without USN.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual-spatial impairments and associated cognitive functions are hard to examine by traditional neuropsychological tests, but they have high potential to be examined in an ecologically valid way through the simulation of spatial information provided by immersive Virtual Reality (VR). This PhD research proposes two studies to investigate the clinical applicability and construct validity of immersive VR in neuropsychological rehabilitation: 1) The immersive Virtual Memory Task (imVMT) was developed to examine spatial memory in a broad range of neurological patients. The main purpose was to apply a gesture-based natural hand interaction and to see in how far typical handicaps such as hemiparesis or visual field defects interfere with task performance. On a neuropsychological level, we aim to identify VR parameters which reveal and differentiate underlying cognitive sub-processes. 2) A virtual road crossing task (iVRoad) was developed to detect discrete symptoms of unilateral spatial neglect (USN) in right-hemispheric post-stroke patients. The aims are a) to externally validate iVRoad using conventional neuropsychological tests, b) to identify and evaluate relevant behavioral and eye tracking parameters to distinguish post-stroke patients with and without USN.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual-spatial impairments and associated cognitive functions are hard to examine by traditional neuropsychological tests, but they have high potential to be examined in an ecologically valid way through the simulation of spatial information provided by immersive Virtual Reality (VR). This PhD research proposes two studies to investigate the clinical applicability and construct validity of immersive VR in neuropsychological rehabilitation: 1) The immersive Virtual Memory Task (imVMT) was developed to examine spatial memory in a broad range of neurological patients. The main purpose was to apply a gesture-based natural hand interaction and to see in how far typical handicaps such as hemiparesis or visual field defects interfere with task performance. On a neuropsychological level, we aim to identify VR parameters which reveal and differentiate underlying cognitive sub-processes. 2) A virtual road crossing task (iVRoad) was developed to detect discrete symptoms of unilateral spatial neglect (USN) in right-hemispheric post-stroke patients. The aims are a) to externally validate iVRoad using conventional neuropsychological tests, b) to identify and evaluate relevant behavioral and eye tracking parameters to distinguish post-stroke patients with and without USN.",
"fno": "405700a723",
"keywords": [
"Cognition",
"Gesture Recognition",
"Handicapped Aids",
"Medical Computing",
"Neurophysiology",
"Patient Rehabilitation",
"Virtual Reality",
"Immersive VR",
"Spatial Cognition",
"Spatial Memory",
"Unilateral Spatial Neglect",
"Neurological Patients",
"Visual Spatial Impairments",
"Cognitive Functions",
"Immersive Virtual Reality",
"Neuropsychological Rehabilitation",
"Immersive Virtual Memory Task",
"Gesture Based Natural Hand Interaction",
"Virtual Road Crossing Task",
"Post Stroke Patients",
"I V Road",
"Handicaps",
"Hemiparesis",
"Visualization",
"Solid Modeling",
"Three Dimensional Displays",
"Cognitive Processes",
"Conferences",
"Roads",
"Virtual Reality",
"Spatial Memory",
"Neglect",
"Eye Tracking",
"VR"
],
"authors": [
{
"affiliation": "University Hospital Leipzig,Max Planck Institute for Human Cognitive and Brain Sciences Clinic for Cognitive Neurology",
"fullName": "Julia Belger",
"givenName": "Julia",
"surname": "Belger",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "723-724",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnXQwe02iY",
"name": "pvrw202140570-09419329s1-mm_405700a723.zip",
"size": "283 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419329s1-mm_405700a723.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a721",
"articleId": "1tnWQUIqzza",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a725",
"articleId": "1tnWFqeYuv6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2011/0039/0/05759427",
"title": "Keynote address: Bringing 3D immersive virtual reality technologies to visual-spatial cognition research",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759427/12OmNqBbHOj",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2018/6049/0/604901a385",
"title": "Virtual Reality Learning Environments for Vocational Education: A Comparison Study with Conventional Instructional Media on Knowledge Retention",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a385/12OmNzVoBD5",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-iscmht/2017/1023/0/08338601",
"title": "Motion analysis for unilateral spatial neglect in computational system rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-iscmht/2017/08338601/12OmNzuZUxB",
"parentPublication": {
"id": "proceedings/iciev-iscmht/2017/1023/0",
"title": "2017 6th International Conference on Informatics, Electronics and Vision & 2017 7th International Symposium in Computational Medical and Health Technology (ICIEV-ISCMHT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1995/05/mcg1995050046",
"title": "Trade-Off Between Resolution and Interactivity in Spatial Task Performance",
"doi": null,
"abstractUrl": "/magazine/cg/1995/05/mcg1995050046/13rRUIIVleM",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/02/mcg2018020057",
"title": "An Analysis of VR Technology Used in Immersive Simulations with a Serious Game Perspective",
"doi": null,
"abstractUrl": "/magazine/cg/2018/02/mcg2018020057/13rRUwh80Nv",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a960",
"title": "[DC] Exploration of Context and Physiological Cues for Personalized Emotion-Adaptive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a960/1CJexFbyxUI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a223",
"title": "Designing, Prototyping and Testing of <tex>Z_$360^{\\circ}$_Z</tex> Spatial Audio Conferencing for Virtual Tours",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a223/1J7WxlL5W6Y",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049716",
"title": "How to Maximise Spatial Presence: Design Guidelines for a Virtual Learning Environment for School Use",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049716/1KYooSSVjsk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798171",
"title": "[DC] Dimensionality of Augmented Reality Spatial Interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798171/1cJ0UKlSMP6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a588",
"title": "Multi-modal Spatial Object Localization in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a588/1tuAGAPl3Tq",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.