data dict |
|---|
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysjIlsYus",
"doi": "10.1109/ISMAR-Adjunct.2019.000-3",
"title": "Wearable RemoteFusion: A Mixed Reality Remote Collaboration System with Local Eye Gaze and Remote Hand Gesture Sharing",
"normalizedTitle": "Wearable RemoteFusion: A Mixed Reality Remote Collaboration System with Local Eye Gaze and Remote Hand Gesture Sharing",
"abstract": "We present a wearable Mixed Reality (MR) remote collaboration system called Wearable RemoteFusion. The system supports spatial annotation and view frustum sharing, and enables natural non-verbal communication cues (eye gaze and hand gesture) for visual assistance in a stitched live dense scene. We describe the design and implementation details of the prototype system, and report on a pilot user study investigating how sharing natural gaze and gesture cues affects collaborative performance and the user experience. We found that by sharing augmented natural cues like the local eye gaze and remote hand gesture, participants had a stronger feeling of Co-presence, and the remote user could guide the local user to complete tasks with less physical workload. We discuss implications for collaborative interface design and directions for future research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a wearable Mixed Reality (MR) remote collaboration system called Wearable RemoteFusion. The system supports spatial annotation and view frustum sharing, and enables natural non-verbal communication cues (eye gaze and hand gesture) for visual assistance in a stitched live dense scene. We describe the design and implementation details of the prototype system, and report on a pilot user study investigating how sharing natural gaze and gesture cues affects collaborative performance and the user experience. We found that by sharing augmented natural cues like the local eye gaze and remote hand gesture, participants had a stronger feeling of Co-presence, and the remote user could guide the local user to complete tasks with less physical workload. We discuss implications for collaborative interface design and directions for future research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a wearable Mixed Reality (MR) remote collaboration system called Wearable RemoteFusion. The system supports spatial annotation and view frustum sharing, and enables natural non-verbal communication cues (eye gaze and hand gesture) for visual assistance in a stitched live dense scene. We describe the design and implementation details of the prototype system, and report on a pilot user study investigating how sharing natural gaze and gesture cues affects collaborative performance and the user experience. We found that by sharing augmented natural cues like the local eye gaze and remote hand gesture, participants had a stronger feeling of Co-presence, and the remote user could guide the local user to complete tasks with less physical workload. We discuss implications for collaborative interface design and directions for future research.",
"fno": "476500a393",
"keywords": [
"Augmented Reality",
"Gesture Recognition",
"Groupware",
"Human Computer Interaction",
"Spatial Annotation",
"View Frustum Sharing",
"Nonverbal Communication Cues",
"Visual Assistance",
"Wearable Mixed Reality Remote Collaboration System",
"Wearable Remote Fusion",
"Remote Hand Gesture Sharing",
"Collaborative Interface Design",
"Local Eye Gaze",
"Augmented Natural Cues",
"User Experience",
"Collaborative Performance",
"Gesture Cues",
"Natural Gaze",
"Stitched Live Dense Scene",
"Collaboration",
"Virtual Reality",
"Task Analysis",
"Three Dimensional Displays",
"Annotations",
"Biomedical Engineering",
"Mixed Reality Augmented Reality Remote Collaboration Eye Gaze Hand Gesture"
],
"authors": [
{
"affiliation": "Auckland Bioengineering Institute, University of Auckland",
"fullName": "Prasanth Sasikumar",
"givenName": "Prasanth",
"surname": "Sasikumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury",
"fullName": "Lei Gao",
"givenName": "Lei",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Auckland Bioengineering Institute, University of Auckland",
"fullName": "Huidong Bai",
"givenName": "Huidong",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Auckland Bioengineering Institute, University of Auckland",
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "393-394",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a387",
"articleId": "1gyslQzq07K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a395",
"articleId": "1gyskQ3YBeU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671795",
"title": "Study of augmented gesture communication cues and view sharing in remote collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671795/12OmNwl8GBu",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a079",
"title": "[POSTER] Mutually Shared Gaze in Augmented Video Conference",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a079/12OmNyQYt9o",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a218",
"title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523400",
"title": "Do You See What I See? The Effect of Gaze Tracking on Task Space Remote Collaboration",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523400/13rRUy0HYRu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a956",
"title": "[DC]Using Multimodal Input in Augmented Virtual Teleportation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a956/1CJcYgs1MY0",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a250",
"title": "Using Speech to Visualise Shared Gaze Cues in MR Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a250/1CJcnpSVomk",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a837",
"title": "Comparing Gaze-Supported Modalities with Empathic Mixed Reality Interfaces in Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a837/1JrRgMzkUBq",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798327",
"title": "Eye-gaze-triggered Visual Cues to Restore Attention in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798327/1cJ0HmmdfUY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798024",
"title": "Head Pointer or Eye Gaze: Which Helps More in MR Remote Collaboration?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798024/1cJ0MmguvG8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a473",
"title": "The Impact of Gaze Cues in Mixed Reality Collaborations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a473/1yeQCejb7Co",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gyslGAU3Ha",
"doi": "10.1109/ISMAR-Adjunct.2019.00-66",
"title": "VesARlius: An Augmented Reality System for Large-Group Co-located Anatomy Learning",
"normalizedTitle": "VesARlius: An Augmented Reality System for Large-Group Co-located Anatomy Learning",
"abstract": "Interactive educational environments are one of the prime applications for the use of Augmented Reality (AR). A large variety of such systems has been proposed in the past for various areas of education. However, in most cases the number of users these AR systems can support is limited. Only few systems have been developed that support a large number of co-located users to jointly collaborate in a dynamic and interactive learning environment. Multi-user AR collaboration presents a unique setting with distinct challenges and requirements for user interaction and information sharing. In this paper, we present VesARlius, a novel AR system for collaborative and interactive anatomy learning in a large group of co-located users. Our system employs a set of multi-user collaboration paradigms allowing users to engage in an interactive AR learning environment. We evaluated the collaborative features of our system in a user study with 16 medical students. Results demonstrate the potential of the VesARlius system to be used effectively for large-group AR anatomy learning. From our lessons learned, we provide a set of design guidelines for developing similar AR systems to enable large-group collaboration in other application domains.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Interactive educational environments are one of the prime applications for the use of Augmented Reality (AR). A large variety of such systems has been proposed in the past for various areas of education. However, in most cases the number of users these AR systems can support is limited. Only few systems have been developed that support a large number of co-located users to jointly collaborate in a dynamic and interactive learning environment. Multi-user AR collaboration presents a unique setting with distinct challenges and requirements for user interaction and information sharing. In this paper, we present VesARlius, a novel AR system for collaborative and interactive anatomy learning in a large group of co-located users. Our system employs a set of multi-user collaboration paradigms allowing users to engage in an interactive AR learning environment. We evaluated the collaborative features of our system in a user study with 16 medical students. Results demonstrate the potential of the VesARlius system to be used effectively for large-group AR anatomy learning. From our lessons learned, we provide a set of design guidelines for developing similar AR systems to enable large-group collaboration in other application domains.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Interactive educational environments are one of the prime applications for the use of Augmented Reality (AR). A large variety of such systems has been proposed in the past for various areas of education. However, in most cases the number of users these AR systems can support is limited. Only few systems have been developed that support a large number of co-located users to jointly collaborate in a dynamic and interactive learning environment. Multi-user AR collaboration presents a unique setting with distinct challenges and requirements for user interaction and information sharing. In this paper, we present VesARlius, a novel AR system for collaborative and interactive anatomy learning in a large group of co-located users. Our system employs a set of multi-user collaboration paradigms allowing users to engage in an interactive AR learning environment. We evaluated the collaborative features of our system in a user study with 16 medical students. Results demonstrate the potential of the VesARlius system to be used effectively for large-group AR anatomy learning. From our lessons learned, we provide a set of design guidelines for developing similar AR systems to enable large-group collaboration in other application domains.",
"fno": "476500a122",
"keywords": [
"Augmented Reality",
"Biology Computing",
"Computer Aided Instruction",
"Groupware",
"Interactive Systems",
"Medical Computing",
"Multiuser AR Collaboration",
"User Interaction",
"Information Sharing",
"AR System",
"Collaborative Anatomy",
"Interactive Anatomy",
"Multiuser Collaboration Paradigms",
"Interactive AR Learning Environment",
"Collaborative Features",
"Ves A Rlius System",
"Augmented Reality System",
"Interactive Educational Environments",
"Dynamic Learning Environment",
"Interactive Learning Environment",
"Co Located Anatomy Learning",
"Collaboration",
"Synchronization",
"Pins",
"Three Dimensional Displays",
"Solid Modeling",
"Augmented Reality"
],
"authors": [
{
"affiliation": "Technische Universität München",
"fullName": "Felix Bork",
"givenName": "Felix",
"surname": "Bork",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München",
"fullName": "Alexander Lehner",
"givenName": "Alexander",
"surname": "Lehner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ludwig-Maximilians Universität",
"fullName": "Daniela Kugelmann",
"givenName": "Daniela",
"surname": "Kugelmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München",
"fullName": "Ulrich Eck",
"givenName": "Ulrich",
"surname": "Eck",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ludwig-Maximilians Universität",
"fullName": "Jens Waschke",
"givenName": "Jens",
"surname": "Waschke",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München",
"fullName": "Nassir Navab",
"givenName": "Nassir",
"surname": "Navab",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "122-123",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a118",
"articleId": "1gysk60HxPW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a124",
"articleId": "1gysnb0tidq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismarw/2016/3740/0/07836520",
"title": "First Deployment of Diminished Reality for Anatomy Education",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836520/12OmNAYGlBY",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icimt/2009/3922/0/3922a019",
"title": "Collaborative Augmented Reality Approach for Multi-user Interaction in Urban Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/icimt/2009/3922a019/12OmNwDACCo",
"parentPublication": {
"id": "proceedings/icimt/2009/3922/0",
"title": "Information and Multimedia Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836453",
"title": "Challenges for Asynchronous Collaboration in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836453/12OmNxaw5c0",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948517",
"title": "Collaboration in mediated and augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948517/12OmNy6HQPU",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802077",
"title": "An AR edutainment system supporting bone anatomy learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802077/12OmNylKAKS",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a169",
"title": "Empirical Study of Non-Reversing Magic Mirrors for Augmented Reality Anatomy Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a169/12OmNyprnqS",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2012/2719/0/06337162",
"title": "Simulation Teaching in 3D Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2012/06337162/12OmNzdGnxC",
"parentPublication": {
"id": "proceedings/iiai-aai/2012/2719/0",
"title": "2012 IIAI International Conference on Advanced Applied Informatics (IIAIAAI 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446450",
"title": "Augmented Reality-Based Personalized Virtual Operative Anatomy for Neurosurgical Guidance and Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446450/13bd1gQYgEs",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/10/ttg2011101380",
"title": "Cross-Organizational Collaboration Supported by Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2011/10/ttg2011101380/13rRUxASuMz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/laclo/2018/0382/0/038200a476",
"title": "Augmented Reality Learning Resources in Anatomy",
"doi": null,
"abstractUrl": "/proceedings-article/laclo/2018/038200a476/1cdOkT11fDG",
"parentPublication": {
"id": "proceedings/laclo/2018/0382/0",
"title": "2018 XIII Latin American Conference on Learning Technologies (LACLO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxaFnm0GQ",
"doi": "10.1109/VR46266.2020.00038",
"title": "Enlightening Patients with Augmented Reality",
"normalizedTitle": "Enlightening Patients with Augmented Reality",
"abstract": "Enlightening Patients with Augmented Reality (EPAR) enhances patient education with new possibilities offered by Augmented Reality. Medical procedures are becoming increasingly complex and printed information sheets are often hard to understand for patients. EPAR developed an augmented reality prototype that helps patients with strabismus to better understand the processes of examinations and eye surgeries. By means of interactive storytelling, three identified target groups based on user personas were able to adjust the level of information transfer based on their interests. We performed a 2-phase evaluation with a total of 24 test subjects, resulting in a final system usability score of 80.0. For interaction prompts concerning virtual 3D content, visual highlights were considered to be sufficient. Overall, participants thought that an AR system as a complementary tool could lead to a better understanding of medical procedures.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Enlightening Patients with Augmented Reality (EPAR) enhances patient education with new possibilities offered by Augmented Reality. Medical procedures are becoming increasingly complex and printed information sheets are often hard to understand for patients. EPAR developed an augmented reality prototype that helps patients with strabismus to better understand the processes of examinations and eye surgeries. By means of interactive storytelling, three identified target groups based on user personas were able to adjust the level of information transfer based on their interests. We performed a 2-phase evaluation with a total of 24 test subjects, resulting in a final system usability score of 80.0. For interaction prompts concerning virtual 3D content, visual highlights were considered to be sufficient. Overall, participants thought that an AR system as a complementary tool could lead to a better understanding of medical procedures.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Enlightening Patients with Augmented Reality (EPAR) enhances patient education with new possibilities offered by Augmented Reality. Medical procedures are becoming increasingly complex and printed information sheets are often hard to understand for patients. EPAR developed an augmented reality prototype that helps patients with strabismus to better understand the processes of examinations and eye surgeries. By means of interactive storytelling, three identified target groups based on user personas were able to adjust the level of information transfer based on their interests. We performed a 2-phase evaluation with a total of 24 test subjects, resulting in a final system usability score of 80.0. For interaction prompts concerning virtual 3D content, visual highlights were considered to be sufficient. Overall, participants thought that an AR system as a complementary tool could lead to a better understanding of medical procedures.",
"fno": "09089476",
"keywords": [
"Augmented Reality",
"Computer Aided Instruction",
"Eye",
"Medical Computing",
"Surgery",
"Virtual Reality",
"EPAR",
"Patient Education",
"Medical Procedures",
"Enlightening Patients With Augmented Reality",
"Interactive Storytelling",
"Education",
"Three Dimensional Displays",
"Augmented Reality",
"Surgery",
"Human Computer Interaction",
"Usability",
"Prototypes",
"Human Centered Computing",
"Mixed Augmented Reality Human Centered Computing",
"Interface Design Prototyping Human Centered Computing",
"Interaction Design Theory",
"Concepts And Paradigms Human Centered Computing",
"Usability Testing"
],
"authors": [
{
"affiliation": "University of Applied Sciences,Institute of Creative\\Media/Technologies,St. Pölten,Austria",
"fullName": "Andreas Jakl",
"givenName": "Andreas",
"surname": "Jakl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences,Institute of Creative\\Media/Technologies,St. Pölten,Austria",
"fullName": "Anna-Maria Lienhart",
"givenName": "Anna-Maria",
"surname": "Lienhart",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences,Institute of Creative\\Media/Technologies,St. Pölten,Austria",
"fullName": "Clemens Baumann",
"givenName": "Clemens",
"surname": "Baumann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences,Institute of Creative\\Media/Technologies,St. Pölten,Austria",
"fullName": "Arian Jalaeefar",
"givenName": "Arian",
"surname": "Jalaeefar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences,Institute of Creative\\Media/Technologies,St. Pölten,Austria",
"fullName": "Alexander Schlager",
"givenName": "Alexander",
"surname": "Schlager",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences,Institute of Creative\\Media/Technologies,St. Pölten,Austria",
"fullName": "Lucas Schöffer",
"givenName": "Lucas",
"surname": "Schöffer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Sciences,Institute of Creative\\Media/Technologies,St. Pölten,Austria",
"fullName": "Franziska Bruckner",
"givenName": "Franziska",
"surname": "Bruckner",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "195-203",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089504",
"articleId": "1jIxfvWzz6o",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089517",
"articleId": "1jIxaKcOOUU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504725",
"title": "Casting shadows: Ecological interface design for augmented reality pedestrian collision warning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504725/12OmNC8uRtR",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a275",
"title": "Towards Engaging Upper Extremity Motor Dysfunction Assessment Using Augmented Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a275/12OmNrHjqLk",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836471",
"title": "A Haptic Serious Augmented Reality Game for Motor Assessment of Parkinson's Disease Patients",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836471/12OmNwFidfy",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539195",
"title": "Haptics in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539195/12OmNyQ7G3s",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770761",
"title": "Augmented Reality Interface Toolkit",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770761/12OmNyUnELp",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446457",
"title": "Memory Task Performance Across Augmented and Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446457/13bd1fph1yg",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2012/07/mco2012070026",
"title": "Anywhere Interfaces Using Handheld Augmented Reality",
"doi": null,
"abstractUrl": "/magazine/co/2012/07/mco2012070026/13rRUxYrbPM",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007333",
"title": "Cognitive Cost of Using Augmented Reality Displays",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007333/13rRUygT7fg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2020/03/08993789",
"title": "Augmented and Virtual Reality in Surgery",
"doi": null,
"abstractUrl": "/magazine/cs/2020/03/08993789/1hkQPiQFzsQ",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a217",
"title": "Lower Limb Balance Rehabilitation of Post-stroke Patients Using an Evaluating and Training Combined Augmented Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a217/1pBMhnkqb04",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXia3PBp6",
"doi": "10.1109/VRW52623.2021.00166",
"title": "Remote Asynchronous Collaboration in Maintenance scenarios using Augmented Reality and Annotations",
"normalizedTitle": "Remote Asynchronous Collaboration in Maintenance scenarios using Augmented Reality and Annotations",
"abstract": "This paper presents an Augmented Reality (AR) remote collaborative approach making use of different stabilized annotation features, part of an ongoing research with partners from the industry. It enables a remote expert to assist an on-site technician during asynchronous maintenance tasks. To foster the creation of a shared understanding, the on-site technician uses mobile AR, allowing the identification of issues, while the remote expert uses a computer to share annotations and provide spatial information about objects, events and areas of interest. The results of a pilot user study to evaluate asynchronous collaborative aspects while using the approach are also presented.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an Augmented Reality (AR) remote collaborative approach making use of different stabilized annotation features, part of an ongoing research with partners from the industry. It enables a remote expert to assist an on-site technician during asynchronous maintenance tasks. To foster the creation of a shared understanding, the on-site technician uses mobile AR, allowing the identification of issues, while the remote expert uses a computer to share annotations and provide spatial information about objects, events and areas of interest. The results of a pilot user study to evaluate asynchronous collaborative aspects while using the approach are also presented.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an Augmented Reality (AR) remote collaborative approach making use of different stabilized annotation features, part of an ongoing research with partners from the industry. It enables a remote expert to assist an on-site technician during asynchronous maintenance tasks. To foster the creation of a shared understanding, the on-site technician uses mobile AR, allowing the identification of issues, while the remote expert uses a computer to share annotations and provide spatial information about objects, events and areas of interest. The results of a pilot user study to evaluate asynchronous collaborative aspects while using the approach are also presented.",
"fno": "405700a567",
"keywords": [
"Augmented Reality",
"Groupware",
"Asynchronous Collaboration",
"Maintenance Scenarios",
"Asynchronous Maintenance Tasks",
"Shared Understanding",
"Mobile AR",
"Asynchronous Collaborative Aspects",
"Stabilized Annotation Features",
"Augmented Reality Remote Collaborative Approach",
"Industries",
"Three Dimensional Displays",
"Annotations",
"Conferences",
"Collaboration",
"Maintenance Engineering",
"User Interfaces",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Collaborative Interaction",
"Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "IEETA, DETI, University of Aveiro,Aveiro,Portugal",
"fullName": "Bernardo Marques",
"givenName": "Bernardo",
"surname": "Marques",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IEETA, DETI, University of Aveiro,Aveiro,Portugal",
"fullName": "Samuel Silva",
"givenName": "Samuel",
"surname": "Silva",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Bosch Thermotechnology,Aveiro,Portugal",
"fullName": "António Rocha",
"givenName": "António",
"surname": "Rocha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IEETA, DETI, University of Aveiro,Aveiro,Portugal",
"fullName": "Paulo Dias",
"givenName": "Paulo",
"surname": "Dias",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IEETA, DETI, University of Aveiro,Aveiro,Portugal",
"fullName": "Beatriz Sousa Santos",
"givenName": "Beatriz Sousa",
"surname": "Santos",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "567-568",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnXhUiwGyY",
"name": "pvrw202140570-09419166s1-mm_405700a567.zip",
"size": "42.4 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419166s1-mm_405700a567.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a565",
"articleId": "1tnWDrGwhA4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a569",
"articleId": "1tnXxLHfCOQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2013/6097/0/06550237",
"title": "Poster: 3D referencing for remote task assistance in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550237/12OmNqC2uWf",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836453",
"title": "Challenges for Asynchronous Collaboration in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836453/12OmNxaw5c0",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdpsw/2018/5555/0/555501a405",
"title": "Unobtrusive Support for Asynchronous GUI Operations with Java Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2018/555501a405/12OmNyuPLa2",
"parentPublication": {
"id": "proceedings/ipdpsw/2018/5555/0",
"title": "2018 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699260",
"title": "Comparing Different Augmented Reality Support Applications for Cooperative Repair of an Industrial Robot",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699260/19F1M8A6RHO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a714",
"title": "Does Remote Expert Representation really matters: A comparison of Video and AR-based Guidance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a714/1CJcBSlQWNa",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcrait/2022/8192/0/819200a373",
"title": "Research on remote guidance of hardware operation and maintenance of computer room based on AR",
"doi": null,
"abstractUrl": "/proceedings-article/gcrait/2022/819200a373/1HcngrngKC4",
"parentPublication": {
"id": "proceedings/gcrait/2022/8192/0",
"title": "2022 Global Conference on Robotics, Artificial Intelligence and Information Technology (GCRAIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a104",
"title": "Integrating AR and VR for Mobile Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a104/1gysoJbmNEI",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a067",
"title": "Industrial Augmented Reality: Concepts and User Interface Designs for Augmented Reality Maintenance Worker Support Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a067/1pBMhXqBhCM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a392",
"title": "Magnoramas: Magnifying Dioramas for Precise Annotations in Asymmetric 3D Teleconsultation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a392/1tuB6zVYXUQ",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a441",
"title": "Dynamic Content Generation for Augmented Technical Support",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a441/1yeQDAAUmg8",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNywfKyu",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs59JEg",
"doi": "10.1109/ISMAR.2010.5643605",
"title": "Floyd-Warshall all-pair shortest path for accurate multi-marker calibration",
"normalizedTitle": "Floyd-Warshall all-pair shortest path for accurate multi-marker calibration",
"abstract": "We propose a novel method to compute the poses of randomly positioned square markers in one world coordinate frame from multiple camera views, by taking the predicted accuracy of the camera pose estimation for each marker into account. The problem of computing the best closed-form solution of the world pose of each marker is modeled as all-pair shortest path problem in graph theory. The computed world poses are further optimized by minimizing the geometric distances in images. Experimental results show that incorporating the predicted accuracy of the pose estimation for each marker yields constant high quality calibration results independent of the order of image sequences compared to cases when this knowledge is not used.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel method to compute the poses of randomly positioned square markers in one world coordinate frame from multiple camera views, by taking the predicted accuracy of the camera pose estimation for each marker into account. The problem of computing the best closed-form solution of the world pose of each marker is modeled as all-pair shortest path problem in graph theory. The computed world poses are further optimized by minimizing the geometric distances in images. Experimental results show that incorporating the predicted accuracy of the pose estimation for each marker yields constant high quality calibration results independent of the order of image sequences compared to cases when this knowledge is not used.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel method to compute the poses of randomly positioned square markers in one world coordinate frame from multiple camera views, by taking the predicted accuracy of the camera pose estimation for each marker into account. The problem of computing the best closed-form solution of the world pose of each marker is modeled as all-pair shortest path problem in graph theory. The computed world poses are further optimized by minimizing the geometric distances in images. Experimental results show that incorporating the predicted accuracy of the pose estimation for each marker yields constant high quality calibration results independent of the order of image sequences compared to cases when this knowledge is not used.",
"fno": "05643605",
"keywords": [
"Calibration",
"Graph Theory",
"Image Sequences",
"Pose Estimation",
"Floyd Warshall",
"Calibration",
"Square Markers",
"Pose Estimation",
"All Pair Shortest Path",
"Graph Theory",
"Image Sequences",
"Cameras",
"Calibration",
"Accuracy",
"Estimation",
"Closed Form Solution",
"Tracking",
"Image Sequences",
"Multi Maker Calibration",
"Visual Marker Based Tracking"
],
"authors": [
{
"affiliation": "Computer Aided Medical Procedures (CAMP), TU Munich, Germany",
"fullName": "Lejing Wang",
"givenName": "Lejing",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Aided Medical Procedures (CAMP), TU Munich, Germany",
"fullName": "Maximilian Springer",
"givenName": "Maximilian",
"surname": "Springer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Aided Medical Procedures (CAMP), TU Munich, Germany",
"fullName": "Hauke Heibel",
"givenName": "Hauke",
"surname": "Heibel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Aided Medical Procedures (CAMP), TU Munich, Germany",
"fullName": "Nassir Navab",
"givenName": "Nassir",
"surname": "Navab",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-10-01T00:00:00",
"pubType": "proceedings",
"pages": "277-278",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-9343-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05643604",
"articleId": "12OmNCd2rON",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05643606",
"articleId": "12OmNzV70zK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/1992/2910/0/00201621",
"title": "A camera calibration using four point-targets",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201621/12OmNA14AfD",
"parentPublication": {
"id": "proceedings/icpr/1992/2910/0",
"title": "1992 11th IAPR International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06238905",
"title": "Calibration for high-definition camera rigs with marker chessboard",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06238905/12OmNAXxWUh",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmsvm/2010/7077/0/05558353",
"title": "An Extended Marker-Based Tracking System for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/wmsvm/2010/05558353/12OmNAiFI7k",
"parentPublication": {
"id": "proceedings/wmsvm/2010/7077/0",
"title": "2010 Second International Conference on Modeling, Simulation and Visualization Methods (WMSVM 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2010/4286/1/4286a195",
"title": "An Improved Camera Calibration Method Using the Fiducial Marker System",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2010/4286a195/12OmNwCaCtd",
"parentPublication": {
"id": "proceedings/icdma/2010/4286/1",
"title": "2010 International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2002/1781/0/17810107",
"title": "Interactive Multi-Marker Calibration for Augmented Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2002/17810107/12OmNx4yvxu",
"parentPublication": {
"id": "proceedings/ismar/2002/1781/0",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iai/1994/6250/0/00336666",
"title": "Head/eye calibration of a binocular head by use of single calibration point",
"doi": null,
"abstractUrl": "/proceedings-article/iai/1994/00336666/12OmNzTH0G2",
"parentPublication": {
"id": "proceedings/iai/1994/6250/0",
"title": "Proceedings of the IEEE Southwest Symposium on Image Analysis and Interpretation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2008/3304/6/3304f188",
"title": "Pose Determination of 3D Object Based on Four Straight Lines",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2008/3304f188/12OmNzXnNB6",
"parentPublication": {
"id": "proceedings/icnc/2008/3304/6",
"title": "2008 Fourth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a031",
"title": "Efficient Pose Selection for Interactive Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a031/17D45XH89n0",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300b497",
"title": "Calibration Wizard: A Guidance System for Camera Calibration Based on Modelling Geometric and Corner Uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300b497/1hVlBp1zhpm",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600c497",
"title": "Sports Camera Calibration via Synthetic Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600c497/1iTvqBvCX84",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwekjuE",
"title": "International Conference on Computer Graphics, Imaging and Visualization (CGIV'05)",
"acronym": "cgiv",
"groupId": "1001775",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzR8CxW",
"doi": "10.1109/CGIV.2005.32",
"title": "Disturbance-Rejecting Method for Cooperative Object Pose Estimation from Binocular Images",
"normalizedTitle": "Disturbance-Rejecting Method for Cooperative Object Pose Estimation from Binocular Images",
"abstract": "A disturbance-rejecting method for measuring a cooperative object?s pose from binocular images is presented. The presented method optimizes the parameters of the objectS pose and modiJies some measure system parameters simultaneously by bundle adjustment based on initial values. Experiments data show that the method converges quickly and stably, gives accurate results and dos not demand accurate initial values. Especially, when the measure system parameters are disturbed, this method still gives accurate results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A disturbance-rejecting method for measuring a cooperative object?s pose from binocular images is presented. The presented method optimizes the parameters of the objectS pose and modiJies some measure system parameters simultaneously by bundle adjustment based on initial values. Experiments data show that the method converges quickly and stably, gives accurate results and dos not demand accurate initial values. Especially, when the measure system parameters are disturbed, this method still gives accurate results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A disturbance-rejecting method for measuring a cooperative object?s pose from binocular images is presented. The presented method optimizes the parameters of the objectS pose and modiJies some measure system parameters simultaneously by bundle adjustment based on initial values. Experiments data show that the method converges quickly and stably, gives accurate results and dos not demand accurate initial values. Especially, when the measure system parameters are disturbed, this method still gives accurate results.",
"fno": "23920127",
"keywords": [
"Disturbance Rejecting",
"Binocular",
"Cooperative Object Pose",
"Bundle Adjustment"
],
"authors": [
{
"affiliation": "National University of Defence Technology - China",
"fullName": "Yang Shang",
"givenName": "Yang",
"surname": "Shang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National University of Defence Technology - China",
"fullName": "Qifeng Yu",
"givenName": "Qifeng",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National University of Defence Technology - China",
"fullName": "Zhihui Lei",
"givenName": "Zhihui",
"surname": "Lei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National University of Defence Technology - China",
"fullName": "Lichun Li",
"givenName": "Lichun",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cgiv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-07-01T00:00:00",
"pubType": "proceedings",
"pages": "127-130",
"year": "2005",
"issn": null,
"isbn": "0-7695-2392-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "23920123",
"articleId": "12OmNroijdz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "23920133",
"articleId": "12OmNya72pw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2001/1143/2/114320644",
"title": "Model-Based Bundle Adjustment with Application to Face Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2001/114320644/12OmNC4eSlP",
"parentPublication": {
"id": "proceedings/iccv/2001/1143/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscid/2011/4500/1/4500a306",
"title": "Research on Rejecting Load Disturbance for Levitation System of Maglev Train",
"doi": null,
"abstractUrl": "/proceedings-article/iscid/2011/4500a306/12OmNxEjXUM",
"parentPublication": {
"id": "proceedings/iscid/2011/4500/1",
"title": "Computational Intelligence and Design, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2009/3641/0/3641b063",
"title": "A 5-Parameter Bundle Adjustment Method for Image Mosaic",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2009/3641b063/12OmNyuy9Px",
"parentPublication": {
"id": "proceedings/icis/2009/3641/0",
"title": "Computer and Information Science, ACIS International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a290",
"title": "Fast Odometry Integration in Local Bundle Adjustment-Based Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a290/12OmNzcPAf8",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a575",
"title": "Monocular Depth from Small Motion Video Accelerated",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a575/12OmNzuZUym",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b876",
"title": "pOSE: Pseudo Object Space Error for Initialization-Free Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b876/17D45VVho3g",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699178",
"title": "A Single-Shot-Per-Pose Camera-Projector Calibration System for Imperfect Planar Targets",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699178/19F1O0IjR8k",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2756",
"title": "Relative Pose from a Calibrated and an Uncalibrated Smartphone Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2756/1H1kIJH9gNq",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a308",
"title": "Motion Capture from Pan-Tilt Cameras with Unknown Orientation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a308/1ezRBTghOZq",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900o4541",
"title": "Efficient Initial Pose-graph Generation for Global SfM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900o4541/1yeKxey0QLK",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H0L28aGKFa",
"doi": "10.1109/CVPR52688.2022.00879",
"title": "Scanline Homographies for Rolling-Shutter Plane Absolute Pose",
"normalizedTitle": "Scanline Homographies for Rolling-Shutter Plane Absolute Pose",
"abstract": "Cameras on portable devices are manufactured with a rolling-shutter (RS) mechanism, where the image rows (aka. scanlines) are read out sequentially. The unknown camera motions during the imaging process cause the so-called RS effects which are solved by motion assumptions in the literature. In this work, we give a solution to the absolute pose problem free of motion assumptions. We categorically demonstrate that the only requirement is motion smoothness instead of stronger constraints on the camera motion. To this end, we propose a novel mathematical abstraction for RS cameras observing a planar scene, called the scanline-homography, a 3 × 2 matrix with 5 DOFs. We establish the relationship between a scanline-homography and the corresponding plane-homography, a 3 × 3 matrix with 6 DOFs assuming the camera is calibrated. We estimate the scanline-homographies of an RS frame using a smooth image warp powered by B-Splines, and recover the plane-homographies afterwards to obtain the scanline-poses based on motion smoothness. We back our claims with various experiments. Code and new datasets: https://bitbucket.org/clermontferrand/planarscanlinehomography/src/master/.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cameras on portable devices are manufactured with a rolling-shutter (RS) mechanism, where the image rows (aka. scanlines) are read out sequentially. The unknown camera motions during the imaging process cause the so-called RS effects which are solved by motion assumptions in the literature. In this work, we give a solution to the absolute pose problem free of motion assumptions. We categorically demonstrate that the only requirement is motion smoothness instead of stronger constraints on the camera motion. To this end, we propose a novel mathematical abstraction for RS cameras observing a planar scene, called the scanline-homography, a 3 × 2 matrix with 5 DOFs. We establish the relationship between a scanline-homography and the corresponding plane-homography, a 3 × 3 matrix with 6 DOFs assuming the camera is calibrated. We estimate the scanline-homographies of an RS frame using a smooth image warp powered by B-Splines, and recover the plane-homographies afterwards to obtain the scanline-poses based on motion smoothness. We back our claims with various experiments. Code and new datasets: https://bitbucket.org/clermontferrand/planarscanlinehomography/src/master/.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cameras on portable devices are manufactured with a rolling-shutter (RS) mechanism, where the image rows (aka. scanlines) are read out sequentially. The unknown camera motions during the imaging process cause the so-called RS effects which are solved by motion assumptions in the literature. In this work, we give a solution to the absolute pose problem free of motion assumptions. We categorically demonstrate that the only requirement is motion smoothness instead of stronger constraints on the camera motion. To this end, we propose a novel mathematical abstraction for RS cameras observing a planar scene, called the scanline-homography, a 3 × 2 matrix with 5 DOFs. We establish the relationship between a scanline-homography and the corresponding plane-homography, a 3 × 3 matrix with 6 DOFs assuming the camera is calibrated. We estimate the scanline-homographies of an RS frame using a smooth image warp powered by B-Splines, and recover the plane-homographies afterwards to obtain the scanline-poses based on motion smoothness. We back our claims with various experiments. Code and new datasets: https://bitbucket.org/clermontferrand/planarscanlinehomography/src/master/.",
"fno": "694600i983",
"keywords": [
"Calibration",
"Matrix Algebra",
"Pose Estimation",
"Splines Mathematics",
"Rolling Shutter Plane Absolute",
"Rolling Shutter Mechanism",
"Image Rows",
"Motion Smoothness",
"Camera Motion",
"RS Cameras",
"Scanline Homography",
"RS Frame",
"Smooth Image Warp",
"Scanline Poses",
"Plane Homography",
"Photography",
"Geometry",
"Computer Vision",
"Transmission Line Matrix Methods",
"Codes",
"Pose Estimation",
"Cameras"
],
"authors": [
{
"affiliation": "Institut Pascal, UMR6602 CNRS, Université Clermont Auvergne,ENCOV, TGI,France",
"fullName": "Fang Bai",
"givenName": "Fang",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institut Pascal, UMR6602 CNRS, Université Clermont Auvergne,ENCOV, TGI,France",
"fullName": "Agniva Sengupta",
"givenName": "Agniva",
"surname": "Sengupta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institut Pascal, UMR6602 CNRS, Université Clermont Auvergne,ENCOV, TGI,France",
"fullName": "Adrien Bartoli",
"givenName": "Adrien",
"surname": "Bartoli",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "8983-8992",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H0L236bcxG",
"name": "pcvpr202269460-09878814s1-mm_694600i983.zip",
"size": "19.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878814s1-mm_694600i983.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600i973",
"articleId": "1H0NVOizYPK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600i993",
"articleId": "1H1mTGwW6di",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2015/9711/0/5720a041",
"title": "HDR Recovery Under Rolling Shutter Distortions",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a041/12OmNAjO6Em",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07298760",
"title": "Rolling shutter motion deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298760/12OmNAkWvdx",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/181P2A31",
"title": "Rolling shutter bundle adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/181P2A31/12OmNAsk4zp",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a558",
"title": "Rolling Shutter Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a558/12OmNB0nWaN",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a948",
"title": "Rolling-Shutter-Aware Differential SfM and Image Rectification",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a948/12OmNC2OSNC",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d355",
"title": "Rolling Shutter Absolute Pose Problem with Known Vertical Direction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d355/12OmNs0kyFo",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/2.812E213",
"title": "Inverting a Rolling Shutter Camera: Bring Rolling Shutter Images to High Framerate Global Shutter Video",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/2.812E213/1BmHTvnAJvq",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09926197",
"title": "Rolling Shutter Inversion: Bring Rolling Shutter Images to High Framerate Global Shutter Video",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09926197/1HGJ3Pb5VzW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300e546",
"title": "Learning Structure-And-Motion-Aware Rolling Shutter Correction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300e546/1gyr9GVnqOA",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/08/09020067",
"title": "Rolling Shutter Homography and its Applications",
"doi": null,
"abstractUrl": "/journal/tp/2021/08/09020067/1hS2LoZJKO4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1mTOhuZqw",
"doi": "10.1109/CVPR52688.2022.01079",
"title": "End-to-End Multi-Person Pose Estimation with Transformers",
"normalizedTitle": "End-to-End Multi-Person Pose Estimation with Transformers",
"abstract": "Current methods of multi-person pose estimation typically treat the localization and association of body joints separately. In this paper, we propose the first fully end-to-end multi-person Pose Estimation framework with TRansformers, termed PETR. Our method views pose estimation as a hierarchical set prediction problem and effectively removes the need for many hand-crafted modules like RoI cropping, NMS and grouping post-processing. In PETR, multiple pose queries are learned to directly reason a set of full-body poses. Then a joint decoder is utilized to further refine the poses by exploring the kinematic relations between body joints. With the attention mechanism, the proposed method is able to adaptively attend to the features most relevant to target keypoints, which largely overcomes the feature misalignment difficulty in pose estimation and improves the performance considerably. Extensive experiments on the MS COCO and CrowdPose benchmarks show that PETR plays favorably against state-of-the-art approaches in terms of both accuracy and efficiency. The code and models are available at https://github.com/hikvision-research/opera.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Current methods of multi-person pose estimation typically treat the localization and association of body joints separately. In this paper, we propose the first fully end-to-end multi-person Pose Estimation framework with TRansformers, termed PETR. Our method views pose estimation as a hierarchical set prediction problem and effectively removes the need for many hand-crafted modules like RoI cropping, NMS and grouping post-processing. In PETR, multiple pose queries are learned to directly reason a set of full-body poses. Then a joint decoder is utilized to further refine the poses by exploring the kinematic relations between body joints. With the attention mechanism, the proposed method is able to adaptively attend to the features most relevant to target keypoints, which largely overcomes the feature misalignment difficulty in pose estimation and improves the performance considerably. Extensive experiments on the MS COCO and CrowdPose benchmarks show that PETR plays favorably against state-of-the-art approaches in terms of both accuracy and efficiency. The code and models are available at https://github.com/hikvision-research/opera.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Current methods of multi-person pose estimation typically treat the localization and association of body joints separately. In this paper, we propose the first fully end-to-end multi-person Pose Estimation framework with TRansformers, termed PETR. Our method views pose estimation as a hierarchical set prediction problem and effectively removes the need for many hand-crafted modules like RoI cropping, NMS and grouping post-processing. In PETR, multiple pose queries are learned to directly reason a set of full-body poses. Then a joint decoder is utilized to further refine the poses by exploring the kinematic relations between body joints. With the attention mechanism, the proposed method is able to adaptively attend to the features most relevant to target keypoints, which largely overcomes the feature misalignment difficulty in pose estimation and improves the performance considerably. Extensive experiments on the MS COCO and CrowdPose benchmarks show that PETR plays favorably against state-of-the-art approaches in terms of both accuracy and efficiency. The code and models are available at https://github.com/hikvision-research/opera.",
"fno": "694600l1059",
"keywords": [
"Deep Learning Artificial Intelligence",
"Pose Estimation",
"Prediction Theory",
"Body Joints",
"Hierarchical Set Prediction Problem",
"PETR",
"Full Body Poses",
"End To End Multiperson Pose Estimation Framework With Transformers",
"Ro I Cropping",
"NMS",
"Grouping Post Processing",
"Joint Decoder",
"Kinematic Relations",
"MS COCO",
"Crowd Pose Benchmarks",
"Location Awareness",
"Computer Vision",
"Image Analysis",
"Codes",
"Pose Estimation",
"Kinematics",
"Benchmark Testing",
"Pose Estimation And Tracking Recognition Detection",
"Categorization",
"Retrieval Scene Analysis And Understanding"
],
"authors": [
{
"affiliation": "Hikvision Research Institute,Hangzhou,China",
"fullName": "Dahu Shi",
"givenName": "Dahu",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Software Engineering, Xi'an Jiaotong University",
"fullName": "Xing Wei",
"givenName": "Xing",
"surname": "Wei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hikvision Research Institute,Hangzhou,China",
"fullName": "Liangqi Li",
"givenName": "Liangqi",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hikvision Research Institute,Hangzhou,China",
"fullName": "Ye Ren",
"givenName": "Ye",
"surname": "Ren",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hikvision Research Institute,Hangzhou,China",
"fullName": "Wenming Tan",
"givenName": "Wenming",
"surname": "Tan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "11059-11068",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "694600l1050",
"articleId": "1H0Opp1rB0k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600l1069",
"articleId": "1H1j3xF8CRy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457f563",
"title": "Thin-Slicing Network: A Deep Structured Model for Pose Estimation in Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f563/12OmNvnOwv5",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851e938",
"title": "Thin-Slicing for Pose: Learning to Understand Pose without Explicit Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851e938/12OmNzVoBAb",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2021/3176/0/09667045",
"title": "PoseDet: Fast Multi-Person Pose Estimation Using Pose Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09667045/1A6BmXvShR6",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1128",
"title": "Graph-Based 3D Multi-Person Pose Estimation Using Multi-View Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1128/1BmGiyyhfsA",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1169",
"title": "End-to-End Detection and Pose Estimation of Two Interacting Hands",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1169/1BmGmZoOl7G",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956366",
"title": "Jointformer: Single-Frame Lifting Transformer with Error Prediction and Refinement for 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956366/1IHpyM7DQFa",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/06/09954214",
"title": "AlphaPose: Whole-Body Regional Multi-Person Pose Estimation and Tracking in Real-Time",
"doi": null,
"abstractUrl": "/journal/tp/2023/06/09954214/1InorY9vGXS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g950",
"title": "Single-Stage Multi-Person Pose Machines",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g950/1hQqs9ApVAc",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413234",
"title": "Can You Trust Your Pose? Confidence Estimation in Visual Localization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413234/1tmiIBwpcWI",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h645",
"title": "Monocular 3D Multi-Person Pose Estimation by Integrating Top-Down and Bottom-Up Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h645/1yeKL9bgJXO",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1IHotVZum6Q",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "9956007",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1IHp8nwvqow",
"doi": "10.1109/ICPR56361.2022.9956135",
"title": "Fast and efficient minimal solvers for quadric based camera pose estimation",
"normalizedTitle": "Fast and efficient minimal solvers for quadric based camera pose estimation",
"abstract": "In this paper we address absolute camera pose estimation. An efficient (and standard) way to solve this problem, is to use sparse keypoint correspondences. In many cases point features are not available, or are unstable over time and viewing conditions. We propose a framework based on silhouettes of quadric surfaces, with special emphasis on cylinders. We provide mathematical analysis of the problem of projected cylinders in particular, but also general quadrics. We develop a number of minimal solvers for estimating camera pose from silhouette lines of cylinders, given different calibration and cylinder properties. These solvers can be used efficiently in bootstrapping robust estimation schemes, such as RANSAC. Note that even though we have lines as image features, this is a different case than line based pose estimation, since we do not have 2D-line to 3D-line correspondences. We perform synthetic accuracy and robustness tests and evaluate on a number of real case scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we address absolute camera pose estimation. An efficient (and standard) way to solve this problem, is to use sparse keypoint correspondences. In many cases point features are not available, or are unstable over time and viewing conditions. We propose a framework based on silhouettes of quadric surfaces, with special emphasis on cylinders. We provide mathematical analysis of the problem of projected cylinders in particular, but also general quadrics. We develop a number of minimal solvers for estimating camera pose from silhouette lines of cylinders, given different calibration and cylinder properties. These solvers can be used efficiently in bootstrapping robust estimation schemes, such as RANSAC. Note that even though we have lines as image features, this is a different case than line based pose estimation, since we do not have 2D-line to 3D-line correspondences. We perform synthetic accuracy and robustness tests and evaluate on a number of real case scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we address absolute camera pose estimation. An efficient (and standard) way to solve this problem, is to use sparse keypoint correspondences. In many cases point features are not available, or are unstable over time and viewing conditions. We propose a framework based on silhouettes of quadric surfaces, with special emphasis on cylinders. We provide mathematical analysis of the problem of projected cylinders in particular, but also general quadrics. We develop a number of minimal solvers for estimating camera pose from silhouette lines of cylinders, given different calibration and cylinder properties. These solvers can be used efficiently in bootstrapping robust estimation schemes, such as RANSAC. Note that even though we have lines as image features, this is a different case than line based pose estimation, since we do not have 2D-line to 3D-line correspondences. We perform synthetic accuracy and robustness tests and evaluate on a number of real case scenarios.",
"fno": "09956135",
"keywords": [
"Calibration",
"Cameras",
"Computational Geometry",
"Computer Vision",
"Estimation Theory",
"Image Reconstruction",
"Mathematical Analysis",
"Pose Estimation",
"3 D Line Correspondences",
"Absolute Camera",
"Case Scenarios",
"Cylinder Properties",
"Different Case",
"Efficient Minimal Solvers",
"Fast Solvers",
"General Quadrics",
"Given Different Calibration",
"Image Features",
"Mathematical Analysis",
"Projected Cylinders",
"Quadric Based Camera",
"Quadric Surfaces",
"Robust Estimation Schemes",
"Silhouette Lines",
"Silhouettes",
"Sparse Keypoint Correspondences",
"Viewing Conditions",
"Image Edge Detection",
"Pose Estimation",
"Mathematical Analysis",
"Cameras",
"Robustness",
"Pattern Recognition",
"Calibration"
],
"authors": [
{
"affiliation": "Lund University,Centre for Mathematical Sciences,Sweden",
"fullName": "Anna Gummeson",
"givenName": "Anna",
"surname": "Gummeson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lund University,Centre for Mathematical Sciences,Sweden",
"fullName": "Johanna Engman",
"givenName": "Johanna",
"surname": "Engman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lund University,Centre for Mathematical Sciences,Sweden",
"fullName": "Kalle Åström",
"givenName": "Kalle",
"surname": "Åström",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lund University,Centre for Mathematical Sciences,Sweden",
"fullName": "Magnus Oskarsson",
"givenName": "Magnus",
"surname": "Oskarsson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "3973-3979",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9062-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09956473",
"articleId": "1IHpILTVz8Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09956365",
"articleId": "1IHp3Rccu2c",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032c335",
"title": "Making Minimal Solvers for Absolute Pose Estimation Compact and Robust",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c335/12OmNAL3B8G",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118a033",
"title": "Minimal Solvers for Relative Pose with a Single Unknown Radial Distortion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118a033/12OmNrFkeSu",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/02/08388302",
"title": "Globally-Optimal Inlier Set Maximisation for Camera Pose and Correspondence Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2020/02/08388302/13rRUx0gegI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a136",
"title": "Hybrid Camera Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a136/17D45WXIkI4",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c984",
"title": "Camera Pose Estimation with Unknown Principal Point",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c984/17D45XERmmu",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956493",
"title": "Relative Pose Solvers using Monocular Depth",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956493/1IHpH9oN7MI",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/10/09057738",
"title": "Camera Pose Estimation Using First-Order Curve Differential Geometry",
"doi": null,
"abstractUrl": "/journal/tp/2021/10/09057738/1iO4519gFK8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412279",
"title": "Minimal Solvers for Indoor UAV Positioning",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412279/1tmj7NkeRdm",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900n3129",
"title": "Wide-Baseline Multi-Camera Calibration using Person Re-Identification",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900n3129/1yeJOtC0sjS",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900e657",
"title": "Uncertainty-Aware Camera Pose Estimation from Points and Lines",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900e657/1yeLA5aFzlC",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgDQDEgX6",
"doi": "10.1109/VR55154.2023.00041",
"title": "Simultaneous Scene-independent Camera Localization and Category-level Object Pose Estimation via Multi-level Feature Fusion",
"normalizedTitle": "Simultaneous Scene-independent Camera Localization and Category-level Object Pose Estimation via Multi-level Feature Fusion",
"abstract": "In AR/MR applications, camera localization and object pose estimation both play crucial roles. The universality of learning techniques, often referred to as scene-independent localization and category-level pose estimation, presents challenges for both tasks. The two missions maintain close relationships due to the spatial geometry constraint, but differing task requirements result in distinct feature extraction. In this paper, we focus on simultaneous scene-independent camera localization and category-level object pose estimation with a unified learning framework. The system consists of a localization branch called SLO-LocNet, a pose estimation branch called SLO-ObjNet, a feature fusion module for feature sharing between two tasks, and two decoders for creating coordinate maps. In SLO-LocNet, localization features are produced for anticipating the relative pose between two adjusted frames using inputs of color and depth images. Furthermore, we establish an image fusion module in order to promote feature sharing in depth and color branches. With SLO-ObjNet, we take the detected depth image and its corresponding point cloud as inputs, and produce object pose features for pose estimation. A geometry fusion module is created to combine depth and point cloud information simultaneously. Between the two tasks, the image fusion module is also exploited to accomplish feature sharing. In terms of the loss function, we present a mixed optimization function that is composed of the relative camera pose, geometry constraint, absolute and relative object pose terms. To verify how well our algorithm could perform, we conduct experiments on both localization and pose estimation datasets, covering 7 Scenes, ScanNet, REAL275 and YCB-Video. All experiments demonstrate superior performance to other existing methods. We specifically train the network on ScanNet and test it on 7 Scenes to demonstrate the universality performance. Additionally, the positive effects of fusion modules and loss function are also demonstrated.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In AR/MR applications, camera localization and object pose estimation both play crucial roles. The universality of learning techniques, often referred to as scene-independent localization and category-level pose estimation, presents challenges for both tasks. The two missions maintain close relationships due to the spatial geometry constraint, but differing task requirements result in distinct feature extraction. In this paper, we focus on simultaneous scene-independent camera localization and category-level object pose estimation with a unified learning framework. The system consists of a localization branch called SLO-LocNet, a pose estimation branch called SLO-ObjNet, a feature fusion module for feature sharing between two tasks, and two decoders for creating coordinate maps. In SLO-LocNet, localization features are produced for anticipating the relative pose between two adjusted frames using inputs of color and depth images. Furthermore, we establish an image fusion module in order to promote feature sharing in depth and color branches. With SLO-ObjNet, we take the detected depth image and its corresponding point cloud as inputs, and produce object pose features for pose estimation. A geometry fusion module is created to combine depth and point cloud information simultaneously. Between the two tasks, the image fusion module is also exploited to accomplish feature sharing. In terms of the loss function, we present a mixed optimization function that is composed of the relative camera pose, geometry constraint, absolute and relative object pose terms. To verify how well our algorithm could perform, we conduct experiments on both localization and pose estimation datasets, covering 7 Scenes, ScanNet, REAL275 and YCB-Video. All experiments demonstrate superior performance to other existing methods. We specifically train the network on ScanNet and test it on 7 Scenes to demonstrate the universality performance. Additionally, the positive effects of fusion modules and loss function are also demonstrated.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In AR/MR applications, camera localization and object pose estimation both play crucial roles. The universality of learning techniques, often referred to as scene-independent localization and category-level pose estimation, presents challenges for both tasks. The two missions maintain close relationships due to the spatial geometry constraint, but differing task requirements result in distinct feature extraction. In this paper, we focus on simultaneous scene-independent camera localization and category-level object pose estimation with a unified learning framework. The system consists of a localization branch called SLO-LocNet, a pose estimation branch called SLO-ObjNet, a feature fusion module for feature sharing between two tasks, and two decoders for creating coordinate maps. In SLO-LocNet, localization features are produced for anticipating the relative pose between two adjusted frames using inputs of color and depth images. Furthermore, we establish an image fusion module in order to promote feature sharing in depth and color branches. With SLO-ObjNet, we take the detected depth image and its corresponding point cloud as inputs, and produce object pose features for pose estimation. A geometry fusion module is created to combine depth and point cloud information simultaneously. Between the two tasks, the image fusion module is also exploited to accomplish feature sharing. In terms of the loss function, we present a mixed optimization function that is composed of the relative camera pose, geometry constraint, absolute and relative object pose terms. To verify how well our algorithm could perform, we conduct experiments on both localization and pose estimation datasets, covering 7 Scenes, ScanNet, REAL275 and YCB-Video. All experiments demonstrate superior performance to other existing methods. We specifically train the network on ScanNet and test it on 7 Scenes to demonstrate the universality performance. Additionally, the positive effects of fusion modules and loss function are also demonstrated.",
"fno": "481500a254",
"keywords": [
"Location Awareness",
"Point Cloud Compression",
"Geometry",
"Image Color Analysis",
"Pose Estimation",
"Cameras",
"Feature Extraction",
"Scene Independent Camera Localization",
"Cagetory Level Object Pose Estimation",
"Feature Fusion",
"Multi Task Learning",
"Geometry Constraint"
],
"authors": [
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems",
"fullName": "Junyi Wang",
"givenName": "Junyi",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems",
"fullName": "Yue Qi",
"givenName": "Yue",
"surname": "Qi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "254-264",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1MNgDN9lVPW",
"name": "pvr202348150-010108437s1-mm_481500a254.zip",
"size": "2.23 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108437s1-mm_481500a254.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "481500a243",
"articleId": "1MNgyZ3pLFe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a265",
"articleId": "1MNgyv4KlWM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2021/3176/0/09667045",
"title": "PoseDet: Fast Multi-Person Pose Estimation Using Pose Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09667045/1A6BmXvShR6",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c753",
"title": "SGPA: Structure-Guided Prior Adaptation for Category-Level 6D Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c753/1BmLbSYdSX6",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600o4871",
"title": "UDA-COPE: Unsupervised Domain Adaptation for Category-level Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600o4871/1H0OgAYk7du",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g771",
"title": "GPV-Pose: Category-level Object Pose Estimation via Geometry-guided Point-wise Voting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g771/1H1hx09yd2g",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1059",
"title": "End-to-End Multi-Person Pose Estimation with Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1059/1H1mTOhuZqw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600f674",
"title": "SD-Pose: Structural Discrepancy Aware Category-Level 6D Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600f674/1KxVGu336Du",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a253",
"title": "Deep Entwined Learning Head Pose and Face Alignment Inside an Attentional Cascade with Doubly-Conditional fusion",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a253/1kecI4Z0oSY",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413234",
"title": "Can You Trust Your Pose? Confidence Estimation in Visual Localization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413234/1tmiIBwpcWI",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413201",
"title": "PA-FlowNet: Pose-Auxiliary Optical Flow Network for Spacecraft Relative Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413201/1tmjQdBv8M8",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d002",
"title": "FFB6D: A Full Flow Bidirectional Fusion Network for 6D Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d002/1yeJomBIuVa",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tmhi3ly74c",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tmiIBwpcWI",
"doi": "10.1109/ICPR48806.2021.9413234",
"title": "Can You Trust Your Pose? Confidence Estimation in Visual Localization",
"normalizedTitle": "Can You Trust Your Pose? Confidence Estimation in Visual Localization",
"abstract": "Camera pose estimation in large-scale environments is still an open question and, despite recent promising results, it may still fail in some situations. The research so far has focused on improving subcomponents of estimation pipelines, to achieve more accurate poses. However, there is no guarantee for the result to be correct, even though the correctness of pose estimation is critically important in several visual localization applications, such as in autonomous navigation. In this paper we bring to attention a novel research question, pose confidence estimation, where we aim at quantifying how reliable the visually estimated pose is. We develop a novel confidence measure to fulfill this task and show that it can be flexibly applied to different datasets, indoor or outdoor, and for various visual localization pipelines. We also show that the proposed techniques can be used to accomplish a secondary goal: improving the accuracy of existing pose estimation pipelines. Finally, the proposed approach is computationally light-weight and adds only a negligible increase to the computational effort of pose estimation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Camera pose estimation in large-scale environments is still an open question and, despite recent promising results, it may still fail in some situations. The research so far has focused on improving subcomponents of estimation pipelines, to achieve more accurate poses. However, there is no guarantee for the result to be correct, even though the correctness of pose estimation is critically important in several visual localization applications, such as in autonomous navigation. In this paper we bring to attention a novel research question, pose confidence estimation, where we aim at quantifying how reliable the visually estimated pose is. We develop a novel confidence measure to fulfill this task and show that it can be flexibly applied to different datasets, indoor or outdoor, and for various visual localization pipelines. We also show that the proposed techniques can be used to accomplish a secondary goal: improving the accuracy of existing pose estimation pipelines. Finally, the proposed approach is computationally light-weight and adds only a negligible increase to the computational effort of pose estimation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Camera pose estimation in large-scale environments is still an open question and, despite recent promising results, it may still fail in some situations. The research so far has focused on improving subcomponents of estimation pipelines, to achieve more accurate poses. However, there is no guarantee for the result to be correct, even though the correctness of pose estimation is critically important in several visual localization applications, such as in autonomous navigation. In this paper we bring to attention a novel research question, pose confidence estimation, where we aim at quantifying how reliable the visually estimated pose is. We develop a novel confidence measure to fulfill this task and show that it can be flexibly applied to different datasets, indoor or outdoor, and for various visual localization pipelines. We also show that the proposed techniques can be used to accomplish a secondary goal: improving the accuracy of existing pose estimation pipelines. Finally, the proposed approach is computationally light-weight and adds only a negligible increase to the computational effort of pose estimation.",
"fno": "09413234",
"keywords": [
"Cameras",
"Pose Estimation",
"Camera Pose Estimation",
"Visual Localization Pipelines",
"Confidence Measure",
"Pose Confidence Estimation",
"Visual Localization Applications",
"Estimation Pipelines",
"Large Scale Environments",
"Location Awareness",
"Training",
"Measurement",
"Visualization",
"Uncertainty",
"Pose Estimation",
"Pipelines"
],
"authors": [
{
"affiliation": "University of Vaasa,Vaasa,Finland",
"fullName": "Luca Ferranti",
"givenName": "Luca",
"surname": "Ferranti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalto University,Espoo,Finland",
"fullName": "Xiaotian Li",
"givenName": "Xiaotian",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Vaasa,Vaasa,Finland",
"fullName": "Jani Boutellier",
"givenName": "Jani",
"surname": "Boutellier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalto University,Espoo,Finland",
"fullName": "Juho Kannala",
"givenName": "Juho",
"surname": "Kannala",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "5004-5011",
"year": "2021",
"issn": "1051-4651",
"isbn": "978-1-7281-8808-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09412030",
"articleId": "1tmikzvnSdG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09412711",
"articleId": "1tmiS3wZE0E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034c192",
"title": "Mutual Hypothesis Verification for 6D Pose Estimation of Natural Objects",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c192/12OmNAhfIwp",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2021/3176/0/09667045",
"title": "PoseDet: Fast Multi-Person Pose Estimation Using Pose Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09667045/1A6BmXvShR6",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5954",
"title": "Pose Correction for Highly Accurate Visual Localization in Large-scale Indoor Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5954/1BmIcjHn0GY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1059",
"title": "End-to-End Multi-Person Pose Estimation with Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1059/1H1mTOhuZqw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/06/09954214",
"title": "AlphaPose: Whole-Body Regional Multi-Person Pose Estimation and Tracking in Real-Time",
"doi": null,
"abstractUrl": "/journal/tp/2023/06/09954214/1InorY9vGXS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a254",
"title": "Simultaneous Scene-independent Camera Localization and Category-level Object Pose Estimation via Multi-level Feature Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a254/1MNgDQDEgX6",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c758",
"title": "CullNet: Calibrated and Pose Aware Confidence Scores for Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c758/1i5mMsq7oYM",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a253",
"title": "Deep Entwined Learning Head Pose and Face Alignment Inside an Attentional Cascade with Doubly-Conditional fusion",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a253/1kecI4Z0oSY",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413201",
"title": "PA-FlowNet: Pose-Auxiliary Optical Flow Network for Spacecraft Relative Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413201/1tmjQdBv8M8",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d246",
"title": "Back to the Feature: Learning Robust Camera Localization from Pixels to Pose",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d246/1yeJfToH5e0",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tmhi3ly74c",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tmjQdBv8M8",
"doi": "10.1109/ICPR48806.2021.9413201",
"title": "PA-FlowNet: Pose-Auxiliary Optical Flow Network for Spacecraft Relative Pose Estimation",
"normalizedTitle": "PA-FlowNet: Pose-Auxiliary Optical Flow Network for Spacecraft Relative Pose Estimation",
"abstract": "During the process of space travelling and space landing, the spacecraft attitude estimation is the indispensable work for navigation. Since there are not enough satellites for GPS-like localization in space, the computer vision technique is adopted to address the issue. The most crucial task for localization is the extraction of correspondences. In computer vision, optical flow estimation is often used for finding correspondences between images. As the deep neural network being more popular in recent years, FlowNet2 has played a vital role which achieves great success. In this paper, we present PA-FlowNet, an end-to-end pose-auxiliary optical flow network which can use the predicted relative camera pose to improve the performance of optical flow. PA-FlowNet is composed of two sub-networks, the foreground-attention flow network and the pose regression network. The foreground-attention flow network is constructed by FlowNet2 model and modified with the proposed foreground-attention approach. We introduced this approach with the concept of curriculum learning for foreground-background segmentation to avoid backgrounds from resulting in flow prediction error. The pose regression network is used to regress the relative camera pose as an auxiliary for increasing the accuracy of the flow estimation. In addition, to simulate the test environment for spacecraft pose estimation, we construct a 64K moon model and simulate aerial photography with various attitudes to generate Moon64K dataset in this paper. PA-FlowNet significantly outperforms all existing methods on the proposed Moon64K dataset. Furthermore, we also predict the relative camera pose via proposed PA-FlowNet and accomplish the remarkable performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "During the process of space travelling and space landing, the spacecraft attitude estimation is the indispensable work for navigation. Since there are not enough satellites for GPS-like localization in space, the computer vision technique is adopted to address the issue. The most crucial task for localization is the extraction of correspondences. In computer vision, optical flow estimation is often used for finding correspondences between images. As the deep neural network being more popular in recent years, FlowNet2 has played a vital role which achieves great success. In this paper, we present PA-FlowNet, an end-to-end pose-auxiliary optical flow network which can use the predicted relative camera pose to improve the performance of optical flow. PA-FlowNet is composed of two sub-networks, the foreground-attention flow network and the pose regression network. The foreground-attention flow network is constructed by FlowNet2 model and modified with the proposed foreground-attention approach. We introduced this approach with the concept of curriculum learning for foreground-background segmentation to avoid backgrounds from resulting in flow prediction error. The pose regression network is used to regress the relative camera pose as an auxiliary for increasing the accuracy of the flow estimation. In addition, to simulate the test environment for spacecraft pose estimation, we construct a 64K moon model and simulate aerial photography with various attitudes to generate Moon64K dataset in this paper. PA-FlowNet significantly outperforms all existing methods on the proposed Moon64K dataset. Furthermore, we also predict the relative camera pose via proposed PA-FlowNet and accomplish the remarkable performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "During the process of space travelling and space landing, the spacecraft attitude estimation is the indispensable work for navigation. Since there are not enough satellites for GPS-like localization in space, the computer vision technique is adopted to address the issue. The most crucial task for localization is the extraction of correspondences. In computer vision, optical flow estimation is often used for finding correspondences between images. As the deep neural network being more popular in recent years, FlowNet2 has played a vital role which achieves great success. In this paper, we present PA-FlowNet, an end-to-end pose-auxiliary optical flow network which can use the predicted relative camera pose to improve the performance of optical flow. PA-FlowNet is composed of two sub-networks, the foreground-attention flow network and the pose regression network. The foreground-attention flow network is constructed by FlowNet2 model and modified with the proposed foreground-attention approach. We introduced this approach with the concept of curriculum learning for foreground-background segmentation to avoid backgrounds from resulting in flow prediction error. The pose regression network is used to regress the relative camera pose as an auxiliary for increasing the accuracy of the flow estimation. In addition, to simulate the test environment for spacecraft pose estimation, we construct a 64K moon model and simulate aerial photography with various attitudes to generate Moon64K dataset in this paper. PA-FlowNet significantly outperforms all existing methods on the proposed Moon64K dataset. Furthermore, we also predict the relative camera pose via proposed PA-FlowNet and accomplish the remarkable performance.",
"fno": "09413201",
"keywords": [
"Cameras",
"Computer Vision",
"Image Segmentation",
"Image Sensors",
"Image Sequences",
"Learning Artificial Intelligence",
"Neural Nets",
"Object Detection",
"Pose Estimation",
"Regression Analysis",
"Space Vehicles",
"End To End Pose Auxiliary Optical Flow Network",
"Predicted Relative Camera",
"Foreground Attention Flow Network",
"Regression Network",
"Flow Net 2 Model",
"Foreground Attention Approach",
"Foreground Background Segmentation",
"Flow Prediction Error",
"Proposed PA Flow Net",
"Spacecraft Relative Pose Estimation",
"Space Travelling",
"Spacecraft Attitude Estimation",
"Computer Vision Technique",
"Optical Flow Estimation",
"Deep Neural Network",
"Temperature 64 0 K",
"Space Vehicles",
"Location Awareness",
"Computer Vision",
"Pose Estimation",
"Moon",
"Cameras",
"Satellite Navigation Systems",
"Optical Flow",
"Flow Net",
"Relative Camera Pose Estimation",
"Spacecraft Landing",
"Space Localization"
],
"authors": [
{
"affiliation": "National Chiao Tung University,Hsinchu City,Taiwan (R.O.C.)",
"fullName": "Chen Zhi-Yu",
"givenName": "Chen",
"surname": "Zhi-Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chiao Tung University,Hsinchu City,Taiwan (R.O.C.)",
"fullName": "Chen Po-Heng",
"givenName": "Chen",
"surname": "Po-Heng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chiao Tung University,Hsinchu City,Taiwan (R.O.C.)",
"fullName": "Chen Kuan-Wen",
"givenName": "Chen",
"surname": "Kuan-Wen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Space Organization,Hsinchu City,Taiwan (R.O.C.)",
"fullName": "Chan Chen-Yu",
"givenName": "Chan",
"surname": "Chen-Yu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "9703-9710",
"year": "2021",
"issn": "1051-4651",
"isbn": "978-1-7281-8808-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09412426",
"articleId": "1tmjlyNN0go",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09412277",
"articleId": "1tmivWZXFVm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2021/3176/0/09666992",
"title": "Relative Pose Consistency for Semi-Supervised Head Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09666992/1A6BGyUQ4yk",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2021/3176/0/09667045",
"title": "PoseDet: Fast Multi-Person Pose Estimation Using Pose Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09667045/1A6BmXvShR6",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1059",
"title": "End-to-End Multi-Person Pose Estimation with Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1059/1H1mTOhuZqw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/06/09954214",
"title": "AlphaPose: Whole-Body Regional Multi-Person Pose Estimation and Tracking in Real-Time",
"doi": null,
"abstractUrl": "/journal/tp/2023/06/09954214/1InorY9vGXS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c671",
"title": "Polarimetric Relative Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c671/1hVlHKCFUTS",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150733",
"title": "ViPR: Visual-Odometry-aided Pose Regression for 6DoF Camera Localization",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150733/1lPH7elchhK",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413234",
"title": "Can You Trust Your Pose? Confidence Estimation in Visual Localization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413234/1tmiIBwpcWI",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d246",
"title": "Back to the Feature: Learning Robust Camera Localization from Pixels to Pose",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d246/1yeJfToH5e0",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d002",
"title": "FFB6D: A Full Flow Bidirectional Fusion Network for 6D Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d002/1yeJomBIuVa",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h613",
"title": "img2pose: Face Alignment and Detection via 6DoF, Face Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h613/1yeK4plxwKQ",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1B12DGrwoyQ",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1B12YsEjrG0",
"doi": "10.1109/WACV51458.2022.00036",
"title": "HybVIO: Pushing the Limits of Real-time Visual-inertial Odometry",
"normalizedTitle": "HybVIO: Pushing the Limits of Real-time Visual-inertial Odometry",
"abstract": "We present HybVIO, a novel hybrid approach for combining filtering-based visual-inertial odometry (VIO) with optimization-based SLAM. The core of our method is highly robust, independent VIO with improved IMU bias modeling, outlier rejection, stationarity detection, and feature track selection, which is adjustable to run on embedded hardware. Long-term consistency is achieved with a loosely-coupled SLAM module. In academic benchmarks, our solution yields excellent performance in all categories, especially in the real-time use case, where we outperform the current state-of-the-art. We also demonstrate the feasibility of VIO for vehicular tracking on consumer-grade hardware using a custom dataset, and show good performance in comparison to current commercial VISLAM alternatives.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present HybVIO, a novel hybrid approach for combining filtering-based visual-inertial odometry (VIO) with optimization-based SLAM. The core of our method is highly robust, independent VIO with improved IMU bias modeling, outlier rejection, stationarity detection, and feature track selection, which is adjustable to run on embedded hardware. Long-term consistency is achieved with a loosely-coupled SLAM module. In academic benchmarks, our solution yields excellent performance in all categories, especially in the real-time use case, where we outperform the current state-of-the-art. We also demonstrate the feasibility of VIO for vehicular tracking on consumer-grade hardware using a custom dataset, and show good performance in comparison to current commercial VISLAM alternatives.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present HybVIO, a novel hybrid approach for combining filtering-based visual-inertial odometry (VIO) with optimization-based SLAM. The core of our method is highly robust, independent VIO with improved IMU bias modeling, outlier rejection, stationarity detection, and feature track selection, which is adjustable to run on embedded hardware. Long-term consistency is achieved with a loosely-coupled SLAM module. In academic benchmarks, our solution yields excellent performance in all categories, especially in the real-time use case, where we outperform the current state-of-the-art. We also demonstrate the feasibility of VIO for vehicular tracking on consumer-grade hardware using a custom dataset, and show good performance in comparison to current commercial VISLAM alternatives.",
"fno": "091500a287",
"keywords": [
"Distance Measurement",
"Filtering Theory",
"Object Tracking",
"Optimisation",
"Robot Vision",
"SLAM Robots",
"Hyb VIO",
"Real Time Visual Inertial Odometry",
"Combining Filtering Based Visual Inertial Odometry",
"Optimization Based SLAM",
"Independent VIO",
"Improved IMU Bias Modeling",
"Outlier Rejection",
"Stationarity Detection",
"Feature Track Selection",
"Embedded Hardware",
"Long Term Consistency",
"SLAM Module",
"Real Time Use Case",
"Vehicular Tracking",
"Consumer Grade Hardware",
"Computer Vision",
"Simultaneous Localization And Mapping",
"Benchmark Testing",
"Feature Extraction",
"Real Time Systems",
"Hardware",
"3 D Computer Vision Stereo Processing",
"Vision For Aerial Drone Underwater Ground Vehicles"
],
"authors": [
{
"affiliation": "Spectacular AI,Helsinki,Finland",
"fullName": "Otto Seiskari",
"givenName": "Otto",
"surname": "Seiskari",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Spectacular AI,Helsinki,Finland",
"fullName": "Pekka Rantalankila",
"givenName": "Pekka",
"surname": "Rantalankila",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Spectacular AI,Helsinki,Finland",
"fullName": "Juho Kannala",
"givenName": "Juho",
"surname": "Kannala",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Spectacular AI,Helsinki,Finland",
"fullName": "Jerry Ylilammi",
"givenName": "Jerry",
"surname": "Ylilammi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Spectacular AI,Helsinki,Finland",
"fullName": "Esa Rahtu",
"givenName": "Esa",
"surname": "Rahtu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Spectacular AI,Helsinki,Finland",
"fullName": "Arno Solin",
"givenName": "Arno",
"surname": "Solin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-01-01T00:00:00",
"pubType": "proceedings",
"pages": "287-296",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-0915-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1B12YpoMfkc",
"name": "pwacv202209150-09707078s1-mm_091500a287.zip",
"size": "3.88 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pwacv202209150-09707078s1-mm_091500a287.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "091500a277",
"articleId": "1B13xn8f1Bu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "091500a297",
"articleId": "1B13oFhspW0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sbrlarsrobocontrol/2014/6711/0/07024256",
"title": "A Fast Visual Odometry and Mapping System for RGB-D Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/sbrlarsrobocontrol/2014/07024256/12OmNylboJA",
"parentPublication": {
"id": "proceedings/sbrlarsrobocontrol/2014/6711/0",
"title": "2014 Joint Conference on Robotics: SBR-LARS Robotics Symposium and Robocontrol (SBR LARS Robocontrol)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a037",
"title": "Visual-Inertial SLAM Initialization: A General Linear Formulation and a Gravity-Observing Non-Linear Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a037/17D45XuDNEt",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956500",
"title": "Improving Visual Inertial Odometry with UWB Positioning for UAV Indoor Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956500/1IHoVrRSUDK",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nana/2022/6131/0/613100a253",
"title": "Feature Matching for Indoor-Oriented Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/nana/2022/613100a253/1JwPM0I7fIk",
"parentPublication": {
"id": "proceedings/nana/2022/6131/0",
"title": "2022 International Conference on Networking and Network Applications (NaNA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08691513",
"title": "Unsupervised Deep Visual-Inertial Odometry with Online Error Correction for RGB-D Imagery",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08691513/1jeCTblwCMo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2020/9891/0/09108681",
"title": "Towards End-to-end Learning of Visual Inertial Odometry with an EKF",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2020/09108681/1kpIF0G0Ryg",
"parentPublication": {
"id": "proceedings/crv/2020/9891/0",
"title": "2020 17th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523838",
"title": "Instant Visual Odometry Initialization for Mobile AR",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523838/1wpqsbFen3G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100c559",
"title": "DC-VINS: Dynamic Camera Visual Inertial Navigation System with Online Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100c559/1yNi2d28Ej6",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a275",
"title": "RNIN-VIO: Robust Neural Inertial Navigation Aided Visual-Inertial Odometry in Challenging Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a275/1yeCVsvyctG",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a193",
"title": "DVIO: Depth-Aided Visual Inertial Odometry for RGBD Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a193/1yeD3rmluF2",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1IHotVZum6Q",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "9956007",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1IHoVrRSUDK",
"doi": "10.1109/ICPR56361.2022.9956500",
"title": "Improving Visual Inertial Odometry with UWB Positioning for UAV Indoor Navigation",
"normalizedTitle": "Improving Visual Inertial Odometry with UWB Positioning for UAV Indoor Navigation",
"abstract": "This paper presents a method to improve the localization accuracy for visual inertial odometry (VIO) by combining the ultra-wideband (UWB) positioning technology. The overall architecture is mainly divided into two stages. In the first stage, the constraint on UWB short-term position change is adopted to improve the pose estimation results of the VIO system. It is also used to solve the translation error caused by the lack of visual features and vibration during the flight. In the second stage, a loose coupling method based on nonlinear optimization is utilized to fuse the local pose estimator of the VIO system with the global constraints from the UWB positioning. At the beginning of each optimization, the alignment between the VIO and UWB frames is estimated to avoid the influence of the coordinate transformation caused by the cumulative error of the VIO system. Since there are no public datasets available for comparison, we have established several datasets containing large vibration amplitudes and weak feature points. In the experiment, the performance evaluation of the proposed technique is carried out using the Apriltag approach for verification.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a method to improve the localization accuracy for visual inertial odometry (VIO) by combining the ultra-wideband (UWB) positioning technology. The overall architecture is mainly divided into two stages. In the first stage, the constraint on UWB short-term position change is adopted to improve the pose estimation results of the VIO system. It is also used to solve the translation error caused by the lack of visual features and vibration during the flight. In the second stage, a loose coupling method based on nonlinear optimization is utilized to fuse the local pose estimator of the VIO system with the global constraints from the UWB positioning. At the beginning of each optimization, the alignment between the VIO and UWB frames is estimated to avoid the influence of the coordinate transformation caused by the cumulative error of the VIO system. Since there are no public datasets available for comparison, we have established several datasets containing large vibration amplitudes and weak feature points. In the experiment, the performance evaluation of the proposed technique is carried out using the Apriltag approach for verification.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a method to improve the localization accuracy for visual inertial odometry (VIO) by combining the ultra-wideband (UWB) positioning technology. The overall architecture is mainly divided into two stages. In the first stage, the constraint on UWB short-term position change is adopted to improve the pose estimation results of the VIO system. It is also used to solve the translation error caused by the lack of visual features and vibration during the flight. In the second stage, a loose coupling method based on nonlinear optimization is utilized to fuse the local pose estimator of the VIO system with the global constraints from the UWB positioning. At the beginning of each optimization, the alignment between the VIO and UWB frames is estimated to avoid the influence of the coordinate transformation caused by the cumulative error of the VIO system. Since there are no public datasets available for comparison, we have established several datasets containing large vibration amplitudes and weak feature points. In the experiment, the performance evaluation of the proposed technique is carried out using the Apriltag approach for verification.",
"fno": "09956500",
"keywords": [
"Autonomous Aerial Vehicles",
"Distance Measurement",
"Feature Extraction",
"Kalman Filters",
"Mobile Robots",
"Pose Estimation",
"Robot Vision",
"SLAM Robots",
"Ultra Wideband Communication",
"Global Constraints",
"Local Pose Estimator",
"Localization Accuracy",
"Loose Coupling Method",
"Nonlinear Optimization",
"Pose Estimation Results",
"Translation Error",
"UAV Indoor Navigation",
"Ultra Wideband Positioning Technology",
"UWB Frames",
"UWB Positioning",
"UWB Short Term Position Change",
"Vibration",
"VIO System",
"Visual Features",
"Visual Inertial Odometry",
"Vibrations",
"Performance Evaluation",
"Location Awareness",
"Visualization",
"Simultaneous Localization And Mapping",
"Pose Estimation",
"Robustness"
],
"authors": [
{
"affiliation": "National Chung Cheng University,Department of Electrical Engineering,Taiwan",
"fullName": "Jia-Rong Zhan",
"givenName": "Jia-Rong",
"surname": "Zhan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taipei University of Technology,Department of Computer Science and Information Engineering,Taiwan",
"fullName": "Huei-Yung Lin",
"givenName": "Huei-Yung",
"surname": "Lin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "4189-4195",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9062-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09956538",
"articleId": "1IHqsbk2MPm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09956038",
"articleId": "1IHpquLNvR6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvisp/2021/0770/0/077000a105",
"title": "Self-Positioning for Mobile Robot Indoor Navigation Based on Wheel Odometry, Inertia Measurement Unit and Ultra Wideband",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a105/1APqabTYikM",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a287",
"title": "HybVIO: Pushing the Limits of Real-time Visual-inertial Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a287/1B12YsEjrG0",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08691513",
"title": "Unsupervised Deep Visual-Inertial Odometry with Online Error Correction for RGB-D Imagery",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08691513/1jeCTblwCMo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2020/6698/0/669800a934",
"title": "Research on UWB / IMU Fusion Positioning Technology in Mine",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2020/669800a934/1kuHKQnZH9u",
"parentPublication": {
"id": "proceedings/icitbs/2020/6698/0",
"title": "2020 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/02/09158554",
"title": "Deep Visual Odometry With Adaptive Memory",
"doi": null,
"abstractUrl": "/journal/tp/2022/02/09158554/1m1eI3Gko48",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2020/9866/0/986600a247",
"title": "Towards Secure and Scalable UWB-based Positioning Systems",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2020/986600a247/1rsj11uha3C",
"parentPublication": {
"id": "proceedings/mass/2020/9866/0",
"title": "2020 IEEE 17th International Conference on Mobile Ad Hoc and Sensor Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2020/2314/0/231400b505",
"title": "Research on UWB positioning method based on deep learning",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2020/231400b505/1tzyHVPB7Gg",
"parentPublication": {
"id": "proceedings/icmcce/2020/2314/0",
"title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aemcse/2021/1596/0/159600a552",
"title": "Research on UWB positioning trajectory prediction based on LSTM",
"doi": null,
"abstractUrl": "/proceedings-article/aemcse/2021/159600a552/1wcdbHVgITe",
"parentPublication": {
"id": "proceedings/aemcse/2021/1596/0",
"title": "2021 4th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600b168",
"title": "Research on Indoor UWB Positioning Algorithm in NLOS Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600b168/1x3kobIP1Kg",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600c405",
"title": "Integrated Navigation and Positioning based on UWB",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600c405/1x3l62OFYHe",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1KmF7rVz6Y8",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1KmFcm0MBhe",
"doi": "10.1109/AIVR56993.2022.00015",
"title": "Cumulative Evidence for Scene Change Detection and Local Map Updates",
"normalizedTitle": "Cumulative Evidence for Scene Change Detection and Local Map Updates",
"abstract": "Accurate localization and mapping is a key to applications such as Metaverse, Augmented Reality, and Autonomous Driving using positioning technology to determine their global position in consistent world coordinates. As the scene changes with time, map descriptors become outdated, affecting Visual Positioning System localization accuracy. Previous studies have primarily relied on direct comparison of point clouds for change detection, which is a slow process due to the need to build a new point cloud every time. Image-based comparison requires keeping the map images - a privacy issue and is sensitive to viewpoint differences. In this work, we propose a novel approach based on point-clouds descriptors comparison, which can detect structural and texture scene changes followed by the process of local map update. This approach is more robust under appearance changes, even in illumination differences, and more efficient for local map updates as it provides better localization accuracy and faster run times. The cumulative evidence approach eliminates the need for a dedicated mapping process. In addition, our work provides state-of-the-art performances for image-to-image change detection compared to previous research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Accurate localization and mapping is a key to applications such as Metaverse, Augmented Reality, and Autonomous Driving using positioning technology to determine their global position in consistent world coordinates. As the scene changes with time, map descriptors become outdated, affecting Visual Positioning System localization accuracy. Previous studies have primarily relied on direct comparison of point clouds for change detection, which is a slow process due to the need to build a new point cloud every time. Image-based comparison requires keeping the map images - a privacy issue and is sensitive to viewpoint differences. In this work, we propose a novel approach based on point-clouds descriptors comparison, which can detect structural and texture scene changes followed by the process of local map update. This approach is more robust under appearance changes, even in illumination differences, and more efficient for local map updates as it provides better localization accuracy and faster run times. The cumulative evidence approach eliminates the need for a dedicated mapping process. In addition, our work provides state-of-the-art performances for image-to-image change detection compared to previous research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Accurate localization and mapping is a key to applications such as Metaverse, Augmented Reality, and Autonomous Driving using positioning technology to determine their global position in consistent world coordinates. As the scene changes with time, map descriptors become outdated, affecting Visual Positioning System localization accuracy. Previous studies have primarily relied on direct comparison of point clouds for change detection, which is a slow process due to the need to build a new point cloud every time. Image-based comparison requires keeping the map images - a privacy issue and is sensitive to viewpoint differences. In this work, we propose a novel approach based on point-clouds descriptors comparison, which can detect structural and texture scene changes followed by the process of local map update. This approach is more robust under appearance changes, even in illumination differences, and more efficient for local map updates as it provides better localization accuracy and faster run times. The cumulative evidence approach eliminates the need for a dedicated mapping process. In addition, our work provides state-of-the-art performances for image-to-image change detection compared to previous research.",
"fno": "572500a055",
"keywords": [
"Augmented Reality",
"Feature Extraction",
"Image Matching",
"Image Sensors",
"Image Texture",
"Object Detection",
"Pose Estimation",
"SLAM Robots",
"Solid Modelling",
"Traffic Engineering Computing",
"Appearance Changes",
"Cumulative Evidence Approach",
"Dedicated Mapping Process",
"Global Position",
"Image Based Comparison",
"Image To Image Change Detection",
"Local Map Update",
"Map Descriptors",
"Map Images",
"Point Cloud",
"Point Clouds Descriptors Comparison",
"Positioning Technology",
"Scene Change Detection",
"Texture Scene Changes",
"Visual Positioning System Localization Accuracy",
"Location Awareness",
"Point Cloud Compression",
"Visualization",
"Image Segmentation",
"Privacy",
"Three Dimensional Displays",
"Pipelines",
"AR VR",
"Deep Learning",
"Map Update",
"Scene Change Detection",
"Visual Positioning System",
"3 D Point Cloud"
],
"authors": [
{
"affiliation": "Huawei Research,Israel",
"fullName": "Itzik Wilf",
"givenName": "Itzik",
"surname": "Wilf",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huawei Research,Israel",
"fullName": "Nati Daniel",
"givenName": "Nati",
"surname": "Daniel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huawei Research,China",
"fullName": "Lin Manqing",
"givenName": "Lin",
"surname": "Manqing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technion IIT,Israel",
"fullName": "Firas Shama",
"givenName": "Firas",
"surname": "Shama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huawei Research,Israel",
"fullName": "Omri Asraf",
"givenName": "Omri",
"surname": "Asraf",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huawei Research,China",
"fullName": "Feng Wensen",
"givenName": "Feng",
"surname": "Wensen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huawei Research,Israel",
"fullName": "Ofer Kruzel",
"givenName": "Ofer",
"surname": "Kruzel",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-12-01T00:00:00",
"pubType": "proceedings",
"pages": "55-63",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5725-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "572500a046",
"articleId": "1KmFa4g8jXG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "572500a064",
"articleId": "1KmFbVCEHxm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dcabes/2021/2889/0/288900a061",
"title": "NDT-based robot positioning system for large scale diversity environment",
"doi": null,
"abstractUrl": "/proceedings-article/dcabes/2021/288900a061/1AqwxCDExUY",
"parentPublication": {
"id": "proceedings/dcabes/2021/2889/0",
"title": "2021 20th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859689",
"title": "Opendenselane: A Dense Lidar-Based Dataset for HD Map Construction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859689/1G9DVnSiRXO",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c396",
"title": "Long-term Visual Map Sparsification with Heterogeneous GNN",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c396/1H0NaCHPv4Q",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcrait/2022/8192/0/819200a072",
"title": "Vision Localization and Compensation Technology for Clamping Error in Faucet Robotic Grinding",
"doi": null,
"abstractUrl": "/proceedings-article/gcrait/2022/819200a072/1HcnkHmvjK8",
"parentPublication": {
"id": "proceedings/gcrait/2022/8192/0",
"title": "2022 Global Conference on Robotics, Artificial Intelligence and Information Technology (GCRAIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600c612",
"title": "Anomaly Detection in 3D Point Clouds using Deep Geometric Descriptors",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600c612/1L6LAu5ndXG",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300h375",
"title": "Mapping, Localization and Path Planning for Image-Based Navigation Using Visual Features and Map",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300h375/1gyrs6jzQKQ",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300i642",
"title": "DeepMapping: Unsupervised Map Estimation From Multiple Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300i642/1gyrzi3crw4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a838",
"title": "Deep LiDAR localization using optical flow sensor-map correspondences",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a838/1qyxjQGkHPW",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a972",
"title": "Deep Image Comparator: Learning to Visualize Editorial Change",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a972/1yVA568GRuU",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a080",
"title": "BDLoc: Global Localization from 2.5D Building Map",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a080/1yeCVFqROFO",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1grOiRpGmv6",
"title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1grOl4HzJZK",
"doi": "10.1109/AIVR46125.2019.00043",
"title": "A Case Study on Visual-Inertial Odometry using Supervised, Semi-Supervised and Unsupervised Learning Methods",
"normalizedTitle": "A Case Study on Visual-Inertial Odometry using Supervised, Semi-Supervised and Unsupervised Learning Methods",
"abstract": "This paper presents a pilot study comparing three different learning-based visual-inertial odometry (VIO) approaches: supervised, semi-supervised, and unsupervised. Localization and navigation have been the ancient bur important topic in both research area and industry. Many well-developed algorithms have been established regarding this research task using a single sensor or multiple sensors. VIO, that uses images and inertial measurements to estimate the motion, is considered as one of the key technologies to virtual reality and argument reality. With the rapid development of artificial intelligence technology, people have started to explore new methods for VIO instead of traditional feature-based methods. The advantages of using learning-based method can be found in eliminating the calibration and enhance the robustness and accuracy. However, most of the popular learning-based VIO systems require ground truth during training. The lack of training dataset limits the power of neural networks. In this study, we proposed both semi-supervised and unsupervised methods and compared the performances between the supervised model and them. The neural networks were trained and tested on two well-known datasets: KITTI Dataset and EuRoC MAV Dataset.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a pilot study comparing three different learning-based visual-inertial odometry (VIO) approaches: supervised, semi-supervised, and unsupervised. Localization and navigation have been the ancient bur important topic in both research area and industry. Many well-developed algorithms have been established regarding this research task using a single sensor or multiple sensors. VIO, that uses images and inertial measurements to estimate the motion, is considered as one of the key technologies to virtual reality and argument reality. With the rapid development of artificial intelligence technology, people have started to explore new methods for VIO instead of traditional feature-based methods. The advantages of using learning-based method can be found in eliminating the calibration and enhance the robustness and accuracy. However, most of the popular learning-based VIO systems require ground truth during training. The lack of training dataset limits the power of neural networks. In this study, we proposed both semi-supervised and unsupervised methods and compared the performances between the supervised model and them. The neural networks were trained and tested on two well-known datasets: KITTI Dataset and EuRoC MAV Dataset.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a pilot study comparing three different learning-based visual-inertial odometry (VIO) approaches: supervised, semi-supervised, and unsupervised. Localization and navigation have been the ancient bur important topic in both research area and industry. Many well-developed algorithms have been established regarding this research task using a single sensor or multiple sensors. VIO, that uses images and inertial measurements to estimate the motion, is considered as one of the key technologies to virtual reality and argument reality. With the rapid development of artificial intelligence technology, people have started to explore new methods for VIO instead of traditional feature-based methods. The advantages of using learning-based method can be found in eliminating the calibration and enhance the robustness and accuracy. However, most of the popular learning-based VIO systems require ground truth during training. The lack of training dataset limits the power of neural networks. In this study, we proposed both semi-supervised and unsupervised methods and compared the performances between the supervised model and them. The neural networks were trained and tested on two well-known datasets: KITTI Dataset and EuRoC MAV Dataset.",
"fno": "560400a203",
"keywords": [
"Visual Inertial Odometry",
"CNN LSTM",
"Supervised",
"Semi Supervised",
"Unsupervised"
],
"authors": [
{
"affiliation": "Embry-Riddle Aeronautical University",
"fullName": "Yuan Tian",
"givenName": "Yuan",
"surname": "Tian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Embry-Riddle Aeronautical University",
"fullName": "Marc Compere",
"givenName": "Marc",
"surname": "Compere",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-12-01T00:00:00",
"pubType": "proceedings",
"pages": "203-2034",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-5604-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "560400a199",
"articleId": "1grOl7YOx7W",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "560400a208",
"articleId": "1grOmd08qgo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2022/0915/0/091500a287",
"title": "HybVIO: Pushing the Limits of Real-time Visual-inertial Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a287/1B12YsEjrG0",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g604",
"title": "RIO: Rotation-equivariance supervised learning of robust inertial odometry",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g604/1H1kzzVZWcE",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956500",
"title": "Improving Visual Inertial Odometry with UWB Positioning for UAV Indoor Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956500/1IHoVrRSUDK",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0534",
"title": "Selective Sensor Fusion for Neural Visual-Inertial Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0534/1gyrdMr9Ml2",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a366",
"title": "NEAR: The NetEase AR Oriented Visual Inertial Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a366/1gysjSArEsM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08691513",
"title": "Unsupervised Deep Visual-Inertial Odometry with Online Error Correction for RGB-D Imagery",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08691513/1jeCTblwCMo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2020/9891/0/09108681",
"title": "Towards End-to-end Learning of Visual Inertial Odometry with an EKF",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2020/09108681/1kpIF0G0Ryg",
"parentPublication": {
"id": "proceedings/crv/2020/9891/0",
"title": "2020 17th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpbd&is/2020/6512/0/09130571",
"title": "PLS-VIO: Stereo Vision-inertial Odometry Based on Point and Line Features",
"doi": null,
"abstractUrl": "/proceedings-article/hpbd&is/2020/09130571/1l6SQmZTPMY",
"parentPublication": {
"id": "proceedings/hpbd&is/2020/6512/0",
"title": "2020 International Conference on High Performance Big Data and Intelligent Systems (HPBD&IS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a275",
"title": "RNIN-VIO: Robust Neural Inertial Navigation Aided Visual-Inertial Odometry in Challenging Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a275/1yeCVsvyctG",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a193",
"title": "DVIO: Depth-Aided Visual Inertial Odometry for RGBD Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a193/1yeD3rmluF2",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1iTvzrAuJxu",
"title": "2019 IEEE 16th International Conference on Mobile Ad Hoc and Sensor Systems Workshops (MASSW)",
"acronym": "massw",
"groupId": "1836244",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1iTvAvltkM8",
"doi": "10.1109/MASSW.2019.00034",
"title": "HYLOC - A Hybrid Localization and Communication System for Vehicles in Mixed Terrain Environments",
"normalizedTitle": "HYLOC - A Hybrid Localization and Communication System for Vehicles in Mixed Terrain Environments",
"abstract": "Accurate and timely location data of vehicles in Smart Cities is an essential requirement for efficient Intelligent Transportation Systems. However, in mixed terrain environments, like the Holy City of Makkah, single localization mechanism becomes insufficient due to rocky hills, long tunnel, deep valleys, and high-rise buildings. Due to the same reasons, Internet connectivity becomes disrupted on the move. As a result, accurate localization and subsequent update of location data on a remote server become cumbersome. In this paper, we present a hybrid localization and communication system (HYLOC) for vehicles in mixed terrain environments. HYLOC uses multiple localization techniques and communication mechanisms to localize and send location data to remote server in minimum time. Reducing location data report time at remote servers hosting geo-data processing application increases their efficiency and response time. This paper shows how HYLOC improves localization and reduces reporting time using experiment and analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Accurate and timely location data of vehicles in Smart Cities is an essential requirement for efficient Intelligent Transportation Systems. However, in mixed terrain environments, like the Holy City of Makkah, single localization mechanism becomes insufficient due to rocky hills, long tunnel, deep valleys, and high-rise buildings. Due to the same reasons, Internet connectivity becomes disrupted on the move. As a result, accurate localization and subsequent update of location data on a remote server become cumbersome. In this paper, we present a hybrid localization and communication system (HYLOC) for vehicles in mixed terrain environments. HYLOC uses multiple localization techniques and communication mechanisms to localize and send location data to remote server in minimum time. Reducing location data report time at remote servers hosting geo-data processing application increases their efficiency and response time. This paper shows how HYLOC improves localization and reduces reporting time using experiment and analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Accurate and timely location data of vehicles in Smart Cities is an essential requirement for efficient Intelligent Transportation Systems. However, in mixed terrain environments, like the Holy City of Makkah, single localization mechanism becomes insufficient due to rocky hills, long tunnel, deep valleys, and high-rise buildings. Due to the same reasons, Internet connectivity becomes disrupted on the move. As a result, accurate localization and subsequent update of location data on a remote server become cumbersome. In this paper, we present a hybrid localization and communication system (HYLOC) for vehicles in mixed terrain environments. HYLOC uses multiple localization techniques and communication mechanisms to localize and send location data to remote server in minimum time. Reducing location data report time at remote servers hosting geo-data processing application increases their efficiency and response time. This paper shows how HYLOC improves localization and reduces reporting time using experiment and analysis.",
"fno": "412100a133",
"keywords": [
"Geographic Information Systems",
"Intelligent Transportation Systems",
"Smart Cities",
"Holy City Of Makkah",
"Hybrid Localization And Communication System",
"Smart Cities",
"Response Time",
"Geo Data",
"Location Data Report Time",
"Communication Mechanisms",
"Multiple Localization Techniques",
"HYLOC",
"Remote Server",
"High Rise Buildings",
"Single Localization Mechanism",
"Mixed Terrain Environments",
"Efficient Intelligent Transportation Systems",
"Global Positioning System",
"Servers",
"Robot Sensing Systems",
"Communication Systems",
"Performance Evaluation",
"Cameras",
"Buildings",
"Vehicle Localization",
"Assisted GPS",
"BLE Based Localization",
"Hybrid Localization",
"Hybrid Communication"
],
"authors": [
{
"affiliation": "Umm Al-Qura University",
"fullName": "Emad Felemban",
"givenName": "Emad",
"surname": "Felemban",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Umm Al-Qura University",
"fullName": "Adil Amjad",
"givenName": "Adil",
"surname": "Amjad",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "massw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-11-01T00:00:00",
"pubType": "proceedings",
"pages": "133-139",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4121-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "412100a127",
"articleId": "1iTvBjUUtyM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "412100a140",
"articleId": "1iTvB6C29zi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/soca/2014/6833/0/6833a154",
"title": "Self-Contained Localization without Auxiliary Signals on Smart Devices",
"doi": null,
"abstractUrl": "/proceedings-article/soca/2014/6833a154/12OmNApcugY",
"parentPublication": {
"id": "proceedings/soca/2014/6833/0",
"title": "2014 IEEE 7th International Conference on Service-Oriented Computing and Applications (SOCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2910/0/00201586",
"title": "Ground and airborne localization over rough terrain using random environmental range-measurements",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201586/12OmNwGqBpo",
"parentPublication": {
"id": "proceedings/icpr/1992/2910/0",
"title": "1992 11th IAPR International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223174",
"title": "Computational ground and airborne localization over rough terrain",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223174/12OmNxbEtNH",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012228",
"title": "A 'retraction' method for terrain model acquisition",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012228/12OmNzBOhBW",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2017/6724/0/07926552",
"title": "Augmented Terrain-Based Navigation to Enable Persistent Autonomy for Underwater Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2017/07926552/12OmNzJbR37",
"parentPublication": {
"id": "proceedings/irc/2017/6724/0",
"title": "2017 First IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2013/04/ttd2013040744",
"title": "LOBOT: Low-Cost, Self-Contained Localization of Small-Sized Ground Robotic Vehicles",
"doi": null,
"abstractUrl": "/journal/td/2013/04/ttd2013040744/13rRUxYrbLQ",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsgea/2018/6953/0/695301a303",
"title": "Design and Implementation of Debris Search and Rescue Robot System Based on Internet of Things",
"doi": null,
"abstractUrl": "/proceedings-article/icsgea/2018/695301a303/17D45VtKivs",
"parentPublication": {
"id": "proceedings/icsgea/2018/6953/0",
"title": "2018 International Conference on Smart Grid and Electrical Automation (ICSGEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2019/12/08546770",
"title": "Robust Sensor Localization against Known Sensor Position Attacks",
"doi": null,
"abstractUrl": "/journal/tm/2019/12/08546770/17D45Xcttl6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/2023/01/09817385",
"title": "Learning From FM Communications: Toward Accurate, Efficient, All-Terrain Vehicle Localization",
"doi": null,
"abstractUrl": "/journal/nt/2023/01/09817385/1EOzVycd4pa",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2022/05/09214891",
"title": "Localization of Networks on 3D Terrain Surfaces",
"doi": null,
"abstractUrl": "/journal/tm/2022/05/09214891/1nHNFAHLOQE",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1wG69VkmaSk",
"title": "2021 4th International Conference on Intelligent Autonomous Systems (ICoIAS)",
"acronym": "icoias",
"groupId": "1825244",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1wG6dCJ5giY",
"doi": "10.1109/ICoIAS53694.2021.00039",
"title": "GNSS-Aided Visual-Inertial Odomtry with Failure Mode Recognition",
"normalizedTitle": "GNSS-Aided Visual-Inertial Odomtry with Failure Mode Recognition",
"abstract": "In recent years, multi-sensor fusion algorithms have been widely used in simultaneous localization and mapping (SLAM) issues. Global navigation satellite systems (GNSS), camera and inertial measurement unit (IMU) is often used in localization framework. Common sensor configuration includes vision-only, vision-inertial, inertial-GNSS and so on. Due to almost every sensor can be vulnerable in a given environment, we still encounter challenges in improving robustness of the simultaneous localization and mapping system. In order to solve this problem to some extent, we propose a new approach to enhance robustness of the system. We design an adaptive localization framework to switch location strategy based on the failure mode of global navigation satellite system. Experiment shows the absolute trajectory error (APE) of proposed method is better comparing with the current mainstream algorithms.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years, multi-sensor fusion algorithms have been widely used in simultaneous localization and mapping (SLAM) issues. Global navigation satellite systems (GNSS), camera and inertial measurement unit (IMU) is often used in localization framework. Common sensor configuration includes vision-only, vision-inertial, inertial-GNSS and so on. Due to almost every sensor can be vulnerable in a given environment, we still encounter challenges in improving robustness of the simultaneous localization and mapping system. In order to solve this problem to some extent, we propose a new approach to enhance robustness of the system. We design an adaptive localization framework to switch location strategy based on the failure mode of global navigation satellite system. Experiment shows the absolute trajectory error (APE) of proposed method is better comparing with the current mainstream algorithms.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years, multi-sensor fusion algorithms have been widely used in simultaneous localization and mapping (SLAM) issues. Global navigation satellite systems (GNSS), camera and inertial measurement unit (IMU) is often used in localization framework. Common sensor configuration includes vision-only, vision-inertial, inertial-GNSS and so on. Due to almost every sensor can be vulnerable in a given environment, we still encounter challenges in improving robustness of the simultaneous localization and mapping system. In order to solve this problem to some extent, we propose a new approach to enhance robustness of the system. We design an adaptive localization framework to switch location strategy based on the failure mode of global navigation satellite system. Experiment shows the absolute trajectory error (APE) of proposed method is better comparing with the current mainstream algorithms.",
"fno": "419500a175",
"keywords": [
"Image Fusion",
"Inertial Navigation",
"Mobile Robots",
"Robot Vision",
"Satellite Navigation",
"SLAM Robots",
"Mapping System",
"Adaptive Localization Framework",
"Global Navigation Satellite System",
"Failure Mode Recognition",
"Multisensor Fusion Algorithms",
"Camera",
"Inertial Measurement Unit",
"Common Sensor Configuration",
"GNSS Aided Visual Inertial Odometry",
"Location Awareness",
"Global Navigation Satellite System",
"Simultaneous Localization And Mapping",
"Measurement Units",
"Autonomous Systems",
"Switches",
"Filtering Algorithms",
"Graph Optimization",
"Kalman Filter",
"Sensor Fusion",
"Simultaneous Localization And Mapping"
],
"authors": [
{
"affiliation": "Chongqing University,College of Mechanical Engineering,Chongqing,China",
"fullName": "Qiguang Su",
"givenName": "Qiguang",
"surname": "Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chongqing University,College of Mechanical Engineering,Chongqing,China",
"fullName": "Qian Tang",
"givenName": "Qian",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chongqing University,College of Mechanical Engineering,Chongqing,China",
"fullName": "Yi Li",
"givenName": "Yi",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chongqing University,College of Mechanical Engineering,Chongqing,China",
"fullName": "Lianchao Liu",
"givenName": "Lianchao",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icoias",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-05-01T00:00:00",
"pubType": "proceedings",
"pages": "175-180",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4195-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "419500a169",
"articleId": "1wG6epjTGTK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "419500a181",
"articleId": "1wG6dRdLi7e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2016/2312/0/2312a347",
"title": "Influence of Inertial Sensor Errors on GNSS/INS Integrated Navigation Performance",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a347/12OmNvzJFUY",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2016/2312/0/2312a739",
"title": "The Design of Global Navigation Satellite System/Inertial Navigation System Ultra-Tight Integration for High Dynamic Applications",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a739/12OmNxiKs2u",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbd/2018/8034/0/803400a083",
"title": "A SINS-Assisted Fast Acquisition Method for GNSS Signals Based on Compressed Sensing",
"doi": null,
"abstractUrl": "/proceedings-article/cbd/2018/803400a083/17D45VsBTUq",
"parentPublication": {
"id": "proceedings/cbd/2018/8034/0",
"title": "2018 Sixth International Conference on Advanced Cloud and Big Data (CBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2022/8810/0/881000b193",
"title": "Localization Method for SLAM using an Autonomous Cart as a Guard Robot",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2022/881000b193/1FJ5MF7BxbG",
"parentPublication": {
"id": "proceedings/compsac/2022/8810/0",
"title": "2022 IEEE 46th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g594",
"title": "Neural Inertial Localization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g594/1H0LyLa9bGM",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020593",
"title": "Analysis of web-based geo-visualization methods applied for Automated Guided Vehicle using Satellite Navigation Systems",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020593/1KfSMTMPTYk",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itca/2019/6494/0/09092517",
"title": "Robust self-localization system based on multi-sensor information fusion in city environments",
"doi": null,
"abstractUrl": "/proceedings-article/itca/2019/09092517/1jPaULk9Md2",
"parentPublication": {
"id": "proceedings/itca/2019/6494/0",
"title": "2019 International Conference on Information Technology and Computer Application (ITCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2020/4380/0/438000c001",
"title": "A Low-Cost Method for Accurate Localization of Traffic Participants",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2020/438000c001/1r54dcOUVNu",
"parentPublication": {
"id": "proceedings/trustcom/2020/4380/0",
"title": "2020 IEEE 19th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600a990",
"title": "Performance Analysis of GNSS System Time Offset during GNSS Modernization",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600a990/1x3kNiZVon6",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2021/0898/0/089800a161",
"title": "Research on GNSS/DR method based on B-spline and optimized BP neural network",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2021/089800a161/1zw6baySyHu",
"parentPublication": {
"id": "proceedings/ictai/2021/0898/0",
"title": "2021 IEEE 33rd International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeCVsvyctG",
"doi": "10.1109/ISMAR52148.2021.00043",
"title": "RNIN-VIO: Robust Neural Inertial Navigation Aided Visual-Inertial Odometry in Challenging Scenes",
"normalizedTitle": "RNIN-VIO: Robust Neural Inertial Navigation Aided Visual-Inertial Odometry in Challenging Scenes",
"abstract": "In this work, we propose a tightly-coupled EKF framework for visual-inertial odometry with NIN (Neural Inertial Navigation) aided. Traditional VIO systems are fragile in challenging scenes with weak or confusing visual information, such as weak/repeated texture, dynamic environment, fast camera motion with serious motion blur, etc. It is extremely difficult for a vision-based algorithm to handle these problems. So we firstly design a robust deep learning based inertial network (called RNIN), using only IMU measurements as input. RNIN is significantly more robust in challenging scenes than traditional VIO systems. In order to take full advantage of vision-based algorithms in AR/VR areas, we further develop a multi-sensor fusion system RNIN-VIO, which tightly couples the visual, IMU and NIN measurements. Our system performs robustly in extremely challenging conditions, with high precision both in trajectories and AR effects. The experimental results of evaluation on dataset evaluation and online AR demo demonstrate the superiority of the proposed system in robustness and accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we propose a tightly-coupled EKF framework for visual-inertial odometry with NIN (Neural Inertial Navigation) aided. Traditional VIO systems are fragile in challenging scenes with weak or confusing visual information, such as weak/repeated texture, dynamic environment, fast camera motion with serious motion blur, etc. It is extremely difficult for a vision-based algorithm to handle these problems. So we firstly design a robust deep learning based inertial network (called RNIN), using only IMU measurements as input. RNIN is significantly more robust in challenging scenes than traditional VIO systems. In order to take full advantage of vision-based algorithms in AR/VR areas, we further develop a multi-sensor fusion system RNIN-VIO, which tightly couples the visual, IMU and NIN measurements. Our system performs robustly in extremely challenging conditions, with high precision both in trajectories and AR effects. The experimental results of evaluation on dataset evaluation and online AR demo demonstrate the superiority of the proposed system in robustness and accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we propose a tightly-coupled EKF framework for visual-inertial odometry with NIN (Neural Inertial Navigation) aided. Traditional VIO systems are fragile in challenging scenes with weak or confusing visual information, such as weak/repeated texture, dynamic environment, fast camera motion with serious motion blur, etc. It is extremely difficult for a vision-based algorithm to handle these problems. So we firstly design a robust deep learning based inertial network (called RNIN), using only IMU measurements as input. RNIN is significantly more robust in challenging scenes than traditional VIO systems. In order to take full advantage of vision-based algorithms in AR/VR areas, we further develop a multi-sensor fusion system RNIN-VIO, which tightly couples the visual, IMU and NIN measurements. Our system performs robustly in extremely challenging conditions, with high precision both in trajectories and AR effects. The experimental results of evaluation on dataset evaluation and online AR demo demonstrate the superiority of the proposed system in robustness and accuracy.",
"fno": "015800a275",
"keywords": [
"Data Visualisation",
"Deep Learning Artificial Intelligence",
"Inertial Navigation",
"Robust Control",
"Sensor Fusion",
"Trajectory Control",
"NIN",
"VIO Systems",
"Confusing Visual Information",
"Serious Motion Blur",
"Vision Based Algorithm",
"Robust Deep Learning",
"Inertial Network",
"Multisensor Fusion System RNIN VIO",
"Visual IMU",
"Robust Neural Inertial Navigation Aided Visual Inertial Odometry",
"EKF Framework",
"Trajectories AR Effects",
"Visual Information",
"Deep Learning",
"Visualization",
"Fuses",
"Heuristic Algorithms",
"Neural Networks",
"Dynamics",
"Inertial Navigation",
"VIO",
"SLAM",
"INS",
"IMU",
"AR 6 Do F",
"Motion Tracking"
],
"authors": [
{
"affiliation": "Zhejiang University,State Key Lab of CAD & CG",
"fullName": "Danpeng Chen",
"givenName": "Danpeng",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "SenseTime Research and Tetras. AI",
"fullName": "Nan Wang",
"givenName": "Nan",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University,State Key Lab of CAD & CG",
"fullName": "Runsen Xu",
"givenName": "Runsen",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University,State Key Lab of CAD & CG",
"fullName": "Weijian Xie",
"givenName": "Weijian",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University,State Key Lab of CAD & CG",
"fullName": "Hujun Bao",
"givenName": "Hujun",
"surname": "Bao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University,State Key Lab of CAD & CG",
"fullName": "Guofeng Zhang",
"givenName": "Guofeng",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "275-283",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeCVjeKu52",
"name": "pismar202101580-09583805s1-mm_015800a275.zip",
"size": "21.2 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202101580-09583805s1-mm_015800a275.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "015800a265",
"articleId": "1yeCTWHYvxS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a284",
"articleId": "1yeCXRQgdc4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/m2vip/1997/8025/0/80250169",
"title": "Inertial navigation aided with GPS information",
"doi": null,
"abstractUrl": "/proceedings-article/m2vip/1997/80250169/12OmNB06l37",
"parentPublication": {
"id": "proceedings/m2vip/1997/8025/0",
"title": "Mechatronics and Machine Vision in Practice, Annual Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a287",
"title": "HybVIO: Pushing the Limits of Real-time Visual-inertial Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a287/1B12YsEjrG0",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a624",
"title": "Moving Visual-Inertial Ordometry into Cross-platform Web for Markerless Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a624/1CJdePZnsfS",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g594",
"title": "Neural Inertial Localization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g594/1H0LyLa9bGM",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2021/04/08937008",
"title": "Deep Neural Network Based Inertial Odometry Using Low-Cost Inertial Measurement Units",
"doi": null,
"abstractUrl": "/journal/tm/2021/04/08937008/1fTdV3t6L9m",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a366",
"title": "NEAR: The NetEase AR Oriented Visual Inertial Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a366/1gysjSArEsM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08691513",
"title": "Unsupervised Deep Visual-Inertial Odometry with Online Error Correction for RGB-D Imagery",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08691513/1jeCTblwCMo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2020/9891/0/09108681",
"title": "Towards End-to-end Learning of Visual Inertial Odometry with an EKF",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2020/09108681/1kpIF0G0Ryg",
"parentPublication": {
"id": "proceedings/crv/2020/9891/0",
"title": "2020 17th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpbd&is/2020/6512/0/09130571",
"title": "PLS-VIO: Stereo Vision-inertial Odometry Based on Point and Line Features",
"doi": null,
"abstractUrl": "/proceedings-article/hpbd&is/2020/09130571/1l6SQmZTPMY",
"parentPublication": {
"id": "proceedings/hpbd&is/2020/6512/0",
"title": "2020 International Conference on High Performance Big Data and Intelligent Systems (HPBD&IS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a193",
"title": "DVIO: Depth-Aided Visual Inertial Odometry for RGBD Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a193/1yeD3rmluF2",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeD3rmluF2",
"doi": "10.1109/ISMAR52148.2021.00034",
"title": "DVIO: Depth-Aided Visual Inertial Odometry for RGBD Sensors",
"normalizedTitle": "DVIO: Depth-Aided Visual Inertial Odometry for RGBD Sensors",
"abstract": "In past few years we have observed an increase in the usage of RGBD sensors in mobile devices. These sensors provide a good estimate of the depth map for the camera frame, which can be used in numerous augmented reality applications. This paper presents a new visual inertial odometry (VIO) system, which uses measurements from a RGBD sensor and an inertial measurement unit (IMU) sensor for estimating the motion state of the mobile device. The resulting system is called the depth-aided VIO (DVIO) system. In this system we add the depth measurement as part of the nonlinear optimization process. Specifically, we propose methods to use the depth measurement using one-dimensional (1D) feature parameterization as well as three-dimensional (3D) feature parameterization. In addition, we propose to utilize the depth measurement for estimating time offset between the unsynchronized IMU and the RGBD sensors. Last but not least, we propose a novel block-based marginalization approach to speed up the marginalization processes and maintain the real-time performance of the overall system. Experimental results validate that the proposed DVIO system outperforms the other state-of-the-art VIO systems in terms of trajectory accuracy as well as processing time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In past few years we have observed an increase in the usage of RGBD sensors in mobile devices. These sensors provide a good estimate of the depth map for the camera frame, which can be used in numerous augmented reality applications. This paper presents a new visual inertial odometry (VIO) system, which uses measurements from a RGBD sensor and an inertial measurement unit (IMU) sensor for estimating the motion state of the mobile device. The resulting system is called the depth-aided VIO (DVIO) system. In this system we add the depth measurement as part of the nonlinear optimization process. Specifically, we propose methods to use the depth measurement using one-dimensional (1D) feature parameterization as well as three-dimensional (3D) feature parameterization. In addition, we propose to utilize the depth measurement for estimating time offset between the unsynchronized IMU and the RGBD sensors. Last but not least, we propose a novel block-based marginalization approach to speed up the marginalization processes and maintain the real-time performance of the overall system. Experimental results validate that the proposed DVIO system outperforms the other state-of-the-art VIO systems in terms of trajectory accuracy as well as processing time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In past few years we have observed an increase in the usage of RGBD sensors in mobile devices. These sensors provide a good estimate of the depth map for the camera frame, which can be used in numerous augmented reality applications. This paper presents a new visual inertial odometry (VIO) system, which uses measurements from a RGBD sensor and an inertial measurement unit (IMU) sensor for estimating the motion state of the mobile device. The resulting system is called the depth-aided VIO (DVIO) system. In this system we add the depth measurement as part of the nonlinear optimization process. Specifically, we propose methods to use the depth measurement using one-dimensional (1D) feature parameterization as well as three-dimensional (3D) feature parameterization. In addition, we propose to utilize the depth measurement for estimating time offset between the unsynchronized IMU and the RGBD sensors. Last but not least, we propose a novel block-based marginalization approach to speed up the marginalization processes and maintain the real-time performance of the overall system. Experimental results validate that the proposed DVIO system outperforms the other state-of-the-art VIO systems in terms of trajectory accuracy as well as processing time.",
"fno": "015800a193",
"keywords": [
"Augmented Reality",
"Cameras",
"Distance Measurement",
"Image Sensors",
"Inertial Navigation",
"Mobile Robots",
"Optimisation",
"SLAM Robots",
"Mobile Device",
"Depth Aided VIO System",
"Depth Measurement",
"RGBD Sensor",
"DVIO System",
"State Of The Art VIO Systems",
"Depth Aided Visual Inertial Odometry",
"Depth Map",
"Numerous Augmented Reality Applications",
"Visual Inertial Odometry System",
"Inertial Measurement Unit Sensor",
"Visualization",
"Three Dimensional Displays",
"Runtime",
"Sensor Systems",
"Time Measurement",
"Mobile Handsets",
"Sensors",
"VIO",
"Localization",
"Marginalization",
"RGBD Sensor",
"IMU Sensor",
"SLAM",
"Sliding Window",
"Nonlinear Optimization",
"3 D Reconstruction"
],
"authors": [
{
"affiliation": "Samsung Semiconductor, Inc.,SOC R&D",
"fullName": "Abhishek Tyagi",
"givenName": "Abhishek",
"surname": "Tyagi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung Semiconductor, Inc.,SOC R&D",
"fullName": "Yangwen Liang",
"givenName": "Yangwen",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung Semiconductor, Inc.,SOC R&D",
"fullName": "Shuangquan Wang",
"givenName": "Shuangquan",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung Semiconductor, Inc.,SOC R&D",
"fullName": "Dongwoon Bai",
"givenName": "Dongwoon",
"surname": "Bai",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "193-201",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeD3b9vucE",
"name": "pismar202101580-09583782s1-mm_015800a193.zip",
"size": "44.3 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202101580-09583782s1-mm_015800a193.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "015800a184",
"articleId": "1yeCXhKVTXy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a202",
"articleId": "1yeD3LfYbXq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2011/0529/0/05981680",
"title": "A real-time system for 3D recovery of dynamic scene with multiple RGBD imagers",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981680/12OmNAndioT",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892244",
"title": "Sweeping-based volumetric calibration and registration of multiple RGBD-sensors for 3D capturing systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892244/12OmNwlHSSf",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131731",
"title": "Volumetric calibration and registration of multiple RGBD-sensors into a joint coordinate system",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131731/12OmNyqRnac",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223340",
"title": "Volumetric calibration and registration of RGBD-sensors",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223340/12OmNzAoi07",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200k0705",
"title": "DepthTrack: Unveiling the Power of RGBD Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200k0705/1BmK0oMbvk4",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2020/9891/0/09108681",
"title": "Towards End-to-end Learning of Visual Inertial Odometry with an EKF",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2020/09108681/1kpIF0G0Ryg",
"parentPublication": {
"id": "proceedings/crv/2020/9891/0",
"title": "2020 17th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i333",
"title": "RGBD-Dog: Predicting Canine Pose from RGBD Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i333/1m3npw1SCKA",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a275",
"title": "RNIN-VIO: Robust Neural Inertial Navigation Aided Visual-Inertial Odometry in Challenging Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a275/1yeCVsvyctG",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f742",
"title": "Function4D: Real-time Human Volumetric Capture from Very Sparse Consumer RGBD Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f742/1yeJFKObhAY",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b095",
"title": "RGBD-Net: Predicting Color and Depth Images for Novel Views Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b095/1zWEdBgaMF2",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyNQSGO",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAJVcCL",
"doi": "10.1109/CVPR.2007.383465",
"title": "Focal Pre-Correction of Projected Image for Deblurring Screen Image",
"normalizedTitle": "Focal Pre-Correction of Projected Image for Deblurring Screen Image",
"abstract": "We propose a method for reducing out-of-focus blur caused by projector projection. In this method, we estimate the Point-Spread-Function (PSF) of the out-of-focus blur in the image projected onto the screen by comparing the screen image captured by a camera with the original image projected by the projector. According to the estimated PSF, the projected image is pre-corrected, so that the screen image can be deblurred. Experimental results show that our method can reduce out-of-focus projection blur.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a method for reducing out-of-focus blur caused by projector projection. In this method, we estimate the Point-Spread-Function (PSF) of the out-of-focus blur in the image projected onto the screen by comparing the screen image captured by a camera with the original image projected by the projector. According to the estimated PSF, the projected image is pre-corrected, so that the screen image can be deblurred. Experimental results show that our method can reduce out-of-focus projection blur.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a method for reducing out-of-focus blur caused by projector projection. In this method, we estimate the Point-Spread-Function (PSF) of the out-of-focus blur in the image projected onto the screen by comparing the screen image captured by a camera with the original image projected by the projector. According to the estimated PSF, the projected image is pre-corrected, so that the screen image can be deblurred. Experimental results show that our method can reduce out-of-focus projection blur.",
"fno": "04270463",
"keywords": [],
"authors": [
{
"affiliation": "Graduate School of Science and Technology, Keio University, 3-14-1 Hiyoshi Kohoku-ku, Yokohama 223-8",
"fullName": "Yuji Oyamada",
"givenName": "Yuji",
"surname": "Oyamada",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Science and Technology, Keio University, 3-14-1 Hiyoshi Kohoku-ku, Yokohama 223-8",
"fullName": "Hideo Saito",
"givenName": "Hideo",
"surname": "Saito",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2007",
"issn": null,
"isbn": "1-4244-1179-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04270462",
"articleId": "12OmNwCJOUc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04270464",
"articleId": "12OmNBQkx7b",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-graphics/2013/2576/0/06815048",
"title": "Projector-Screen Matching Image Generation Technology for Spatial Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815048/12OmNBKmXsf",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicic/2006/2616/2/261610705",
"title": "An Edge-Driven Total Variation Approach to Image Deblurring and Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/icicic/2006/261610705/12OmNBTawxD",
"parentPublication": {
"id": "proceedings/icicic/2006/2616/3",
"title": "Innovative Computing ,Information and Control, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2012/1662/0/06215220",
"title": "Depth-aware motion deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2012/06215220/12OmNBaBuUP",
"parentPublication": {
"id": "proceedings/iccp/2012/1662/0",
"title": "2012 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206685",
"title": "Coded exposure deblurring: Optimized codes for PSF estimation and invertibility",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206685/12OmNs59JMc",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2010/6984/0/05539935",
"title": "Coded exposure imaging for projective motion deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2010/05539935/12OmNwbLVjY",
"parentPublication": {
"id": "proceedings/cvpr/2010/6984/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2003/1900/1/190010657",
"title": "Motion Deblurring Using Hybrid Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2003/190010657/12OmNx6xHse",
"parentPublication": {
"id": "proceedings/cvpr/2003/1900/1",
"title": "2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/004P1A04",
"title": "Nonlinear camera response functions and image deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/004P1A04/12OmNxu6pcD",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890123",
"title": "Sensor-assisted image deblurring of consumer photos on smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890123/12OmNzkuKyL",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/10/ttp2013102498",
"title": "Nonlinear Camera Response Functions and Image Deblurring: Theoretical Analysis and Practice",
"doi": null,
"abstractUrl": "/journal/tp/2013/10/ttp2013102498/13rRUxDqS5i",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tst/2021/2903/0/290300a036",
"title": "A Nash-game approach to Blind Image Deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/tst/2021/290300a036/1wpuxqhHjZS",
"parentPublication": {
"id": "proceedings/tst/2021/2903/0",
"title": "2021 Third International Conference on Transportation and Smart Technologies (TST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx8wTfL",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBp52Hx",
"doi": "10.1109/ICPR.2008.4761601",
"title": "Calibration of projector-camera systems from virtual mutual projection",
"normalizedTitle": "Calibration of projector-camera systems from virtual mutual projection",
"abstract": "In this paper, we propose a new calibration method for projector-camera systems. The projector-camera systems have been studied extensively as one of new information presenting systems. For calibrating the systems, we in this paper use virtual mutual projections between a camera and a projector. We first show that by using a shadow of the camera, we can generate mutual projection between a camera and a projector virtually. By using the mutual projection, the projector camera system can be calibrated more stably than the existing calibration methods. Some experimental results show that the mutual projections work quite well for calibrating projector-camera systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a new calibration method for projector-camera systems. The projector-camera systems have been studied extensively as one of new information presenting systems. For calibrating the systems, we in this paper use virtual mutual projections between a camera and a projector. We first show that by using a shadow of the camera, we can generate mutual projection between a camera and a projector virtually. By using the mutual projection, the projector camera system can be calibrated more stably than the existing calibration methods. Some experimental results show that the mutual projections work quite well for calibrating projector-camera systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a new calibration method for projector-camera systems. The projector-camera systems have been studied extensively as one of new information presenting systems. For calibrating the systems, we in this paper use virtual mutual projections between a camera and a projector. We first show that by using a shadow of the camera, we can generate mutual projection between a camera and a projector virtually. By using the mutual projection, the projector camera system can be calibrated more stably than the existing calibration methods. Some experimental results show that the mutual projections work quite well for calibrating projector-camera systems.",
"fno": "04761601",
"keywords": [
"Calibration",
"Cameras",
"Calibration",
"Projector Camera Systems",
"Virtual Mutual Projection",
"Calibration",
"Cameras",
"Transmission Line Matrix Methods",
"Equations",
"Geometry"
],
"authors": [
{
"affiliation": "Nagoya Institute of Technology, Gokiso, Showa, 466-8555, Japan",
"fullName": "Fumihiko Sakaue",
"givenName": "Fumihiko",
"surname": "Sakaue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nagoya Institute of Technology, Gokiso, Showa, 466-8555, Japan",
"fullName": "Jun Sato",
"givenName": "Jun",
"surname": "Sato",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1051-4651",
"isbn": "978-1-4244-2174-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04761600",
"articleId": "12OmNCgJece",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04761602",
"articleId": "12OmNzcxZ4I",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2011/0529/0/05981781",
"title": "Simultaneous self-calibration of a projector and a camera using structured light",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981781/12OmNBzRNuv",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270466",
"title": "Photometric Self-Calibration of a Projector-Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270466/12OmNCeK2fW",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciap/2007/2877/0/28770093",
"title": "Calibration and Image Generation of Mobile Projector-Camera Systems",
"doi": null,
"abstractUrl": "/proceedings-article/iciap/2007/28770093/12OmNwtWfIL",
"parentPublication": {
"id": "proceedings/iciap/2007/2877/0",
"title": "2007 14th International Conference on Image Analysis and Processing - ICIAP 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a464",
"title": "Simple, Accurate, and Robust Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2001/1143/1/00937525",
"title": "Smarter presentations: exploiting homography in camera-projector systems",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2001/00937525/12OmNxwncaw",
"parentPublication": {
"id": "proceedings/iccv/2001/1143/1",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a320",
"title": "Active Calibration of Camera-Projector Systems Based on Planar Homography",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07164353",
"title": "On-Site Semi-Automatic Calibration and Registration of a Projector-Camera System Using Arbitrary Objects with Known Geometry",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07164353/13rRUEgs2M6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2005/12/i1845",
"title": "Autocalibration of a Projector-Camera System",
"doi": null,
"abstractUrl": "/journal/tp/2005/12/i1845/13rRUxASuiM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a261",
"title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNC1GueH",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBpVQ2Y",
"doi": "",
"title": "Coded aperture for projector and camera for robust 3D measurement",
"normalizedTitle": "Coded aperture for projector and camera for robust 3D measurement",
"abstract": "General active 3D measurement system using structured light is based on triangulation, which requires correspondence between projection pattern and camera observed pattern. Since both the projected pattern and the camera image should be in focus on the target, the condition makes a severe limitation on depth range of 3D measurement. In this paper, we propose a technique using coded aperture (CA) for projector and camera system to relax the limitation. In our method, Depth from Defocus (DfD) technique is used to resolve the de-focus of projected pattern. By allowing blurry pattern of projection, measurement range is extended compared to common structured light methods. Further, overlapped blur pattern can also be resolved with our technique.",
"abstracts": [
{
"abstractType": "Regular",
"content": "General active 3D measurement system using structured light is based on triangulation, which requires correspondence between projection pattern and camera observed pattern. Since both the projected pattern and the camera image should be in focus on the target, the condition makes a severe limitation on depth range of 3D measurement. In this paper, we propose a technique using coded aperture (CA) for projector and camera system to relax the limitation. In our method, Depth from Defocus (DfD) technique is used to resolve the de-focus of projected pattern. By allowing blurry pattern of projection, measurement range is extended compared to common structured light methods. Further, overlapped blur pattern can also be resolved with our technique.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "General active 3D measurement system using structured light is based on triangulation, which requires correspondence between projection pattern and camera observed pattern. Since both the projected pattern and the camera image should be in focus on the target, the condition makes a severe limitation on depth range of 3D measurement. In this paper, we propose a technique using coded aperture (CA) for projector and camera system to relax the limitation. In our method, Depth from Defocus (DfD) technique is used to resolve the de-focus of projected pattern. By allowing blurry pattern of projection, measurement range is extended compared to common structured light methods. Further, overlapped blur pattern can also be resolved with our technique.",
"fno": "06460424",
"keywords": [
"Cameras",
"Computerised Instrumentation",
"Image Restoration",
"Measurement Systems",
"Coded Aperture",
"3 D Measurement System",
"Structured Light",
"Triangulation",
"Projection Pattern",
"Camera Observed Pattern",
"Camera Image",
"Measurement Depth Range",
"CA",
"Df D Technique",
"Depth From Defocus Technique",
"Projection Blurry Pattern",
"Structured Light Method",
"Cameras",
"Deconvolution",
"Shape",
"Light Sources",
"Apertures",
"Image Reconstruction",
"Biomedical Measurements"
],
"authors": [
{
"affiliation": "Department of Information Science and Biomedical Engineering, Faculty of Engineering, Kagoshima University",
"fullName": "Yuuki Horita",
"givenName": "Yuuki",
"surname": "Horita",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Information Science and Biomedical Engineering, Faculty of Engineering, Kagoshima University",
"fullName": "Yuuki Matugano",
"givenName": "Yuuki",
"surname": "Matugano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Information Science and Biomedical Engineering, Faculty of Engineering, Kagoshima University",
"fullName": "Hiroki Morinaga",
"givenName": "Hiroki",
"surname": "Morinaga",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Information Science and Biomedical Engineering, Faculty of Engineering, Kagoshima University",
"fullName": "Hiroshi Kawasaki",
"givenName": "Hiroshi",
"surname": "Kawasaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Information Science and Biomedical Engineering, Faculty of Engineering, Kagoshima University",
"fullName": "Satoshi Ono",
"givenName": "Satoshi",
"surname": "Ono",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung Yokohama Research Institute Co., Ltd.",
"fullName": "Makoto Kimura",
"givenName": "Makoto",
"surname": "Kimura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung Yokohama Research Institute Co., Ltd.",
"fullName": "Yasuo Takane",
"givenName": "Yasuo",
"surname": "Takane",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1487-1491",
"year": "2012",
"issn": "1051-4651",
"isbn": "978-1-4673-2216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06460423",
"articleId": "12OmNzZEAx3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06460425",
"articleId": "12OmNzZEApJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2011/0529/0/05981787",
"title": "Surface depth computation and representation from multiple coded projector light",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981787/12OmNBWi6Gz",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a910",
"title": "Depth Camera Based on Color-Coded Aperture",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a910/12OmNvm6VHm",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206614",
"title": "A projector-camera setup for geometry-invariant frequency demultiplexing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206614/12OmNvoWV1H",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457d596",
"title": "Simultaneous Geometric and Radiometric Calibration of a Projector-Camera Pair",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d596/12OmNwpGgNQ",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270475",
"title": "Projector Calibration using Arbitrary Planes and Calibrated Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270475/12OmNxYtu7r",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d568",
"title": "Active One-Shot Scan for Wide Depth Range Using a Light Field Projector Based on Coded Aperture",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d568/12OmNxdm4Cp",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c672",
"title": "Time-Multiplexed Coded Aperture Imaging: Learned Coded Aperture and Pixel Exposures for Compressive Imaging Systems",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c672/1BmLc0RAlck",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09930626",
"title": "A Monocular Projector-Camera System using Modular Architecture",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09930626/1HMOYkaK9Ww",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798245",
"title": "Shadowless Projector: Suppressing Shadows in Projection Mapping with Micro Mirror Array Plate",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798245/1cI6ar8DdyE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyS6RMH",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"acronym": "crv",
"groupId": "1001794",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs59JP3",
"doi": "10.1109/CRV.2016.62",
"title": "Blur Calibration for Depth from Defocus",
"normalizedTitle": "Blur Calibration for Depth from Defocus",
"abstract": "Depth from defocus based methods rely on measuring the depth dependent blur at each pixel of the image. A core component in the defocus blur estimation process is the depth variant blur kernel. This blur kernel is often approximated as a Gaussian or pillbox kernel which only works well for small amount of blur. In general the blur kernel depends on the shape of the aperture and can vary a lot with depth. For more accurate blur estimation it is necessary to precisely model the blur kernel. In this paper we present a simple and accurate approach for performing blur kernel calibration for depth from defocus. We also show how to estimate the relative blur kernel from a pair of defocused blur kernels. Our proposed approach can estimate blurs ranging from small (single pixel) to sufficiently large (e.g. 77 x 77 in our experiments). We also experimentally demonstrate that our relative blur estimation method can recover blur kernels for complex asymmetric coded apertures which has not been shown before.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Depth from defocus based methods rely on measuring the depth dependent blur at each pixel of the image. A core component in the defocus blur estimation process is the depth variant blur kernel. This blur kernel is often approximated as a Gaussian or pillbox kernel which only works well for small amount of blur. In general the blur kernel depends on the shape of the aperture and can vary a lot with depth. For more accurate blur estimation it is necessary to precisely model the blur kernel. In this paper we present a simple and accurate approach for performing blur kernel calibration for depth from defocus. We also show how to estimate the relative blur kernel from a pair of defocused blur kernels. Our proposed approach can estimate blurs ranging from small (single pixel) to sufficiently large (e.g. 77 x 77 in our experiments). We also experimentally demonstrate that our relative blur estimation method can recover blur kernels for complex asymmetric coded apertures which has not been shown before.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Depth from defocus based methods rely on measuring the depth dependent blur at each pixel of the image. A core component in the defocus blur estimation process is the depth variant blur kernel. This blur kernel is often approximated as a Gaussian or pillbox kernel which only works well for small amount of blur. In general the blur kernel depends on the shape of the aperture and can vary a lot with depth. For more accurate blur estimation it is necessary to precisely model the blur kernel. In this paper we present a simple and accurate approach for performing blur kernel calibration for depth from defocus. We also show how to estimate the relative blur kernel from a pair of defocused blur kernels. Our proposed approach can estimate blurs ranging from small (single pixel) to sufficiently large (e.g. 77 x 77 in our experiments). We also experimentally demonstrate that our relative blur estimation method can recover blur kernels for complex asymmetric coded apertures which has not been shown before.",
"fno": "2491a281",
"keywords": [
"Kernel",
"Estimation",
"Lenses",
"Calibration",
"Cameras",
"Apertures",
"Shape",
"Optimization",
"Depth From Defocus",
"Point Spread Functions",
"Relative Blur"
],
"authors": [
{
"affiliation": null,
"fullName": "Fahim Mannan",
"givenName": "Fahim",
"surname": "Mannan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Michael S. Langer",
"givenName": "Michael S.",
"surname": "Langer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "crv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "281-288",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-2491-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2491a273",
"articleId": "12OmNvCzFe8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2491a289",
"articleId": "12OmNzaQo9L",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccms/2010/5642/2/05421108",
"title": "Depth Recovery from Defocus Images Using Total Variation",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/05421108/12OmNBbaH6r",
"parentPublication": {
"id": "proceedings/iccms/2010/5642/2",
"title": "2010 Second International Conference on Computer Modeling and Simulation (ICCMS 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761514",
"title": "Depth recovery using defocus blur at infinity",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761514/12OmNBkxstu",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a326",
"title": "Optimal Camera Parameters for Depth from Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a326/12OmNC8MsHb",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a592",
"title": "Discriminative Filters for Depth from Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a592/12OmNCmpcHL",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a855",
"title": "Blur-Aware Disparity Estimation from Defocus Stereo Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a855/12OmNqGA51a",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a273",
"title": "What is a Good Model for Depth from Defocus?",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a273/12OmNvCzFe8",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032f381",
"title": "Estimating Defocus Blur via Rank of Local Patches",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032f381/12OmNvEQseS",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457e773",
"title": "Depth from Defocus in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457e773/12OmNx5Yvot",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a370",
"title": "Video Depth-from-Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a370/12OmNy5hRoj",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2001/1143/1/00937556",
"title": "Depth from defocus in presence of partial self occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2001/00937556/12OmNzyYib1",
"parentPublication": {
"id": "proceedings/iccv/2001/1143/1",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy4r3R2",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxzMnPB",
"doi": "10.1109/CVPR.2009.5206622",
"title": "Catadioptric projectors",
"normalizedTitle": "Catadioptric projectors",
"abstract": "We present a catadioptric projector analogous to a catadioptric camera by combining a commodity digital projector with additional optical units. We show that, by using specially shaped reflectors/refractors, catadioptric projectors can offer an unprecedented level of flexibility in aspect ratio, size, and field of view. We also present efficient algorithms to reduce projection artifacts in catadioptric projectors, such as distortions, scattering, and defocusing. Instead of recovering the reflector/refractor geometry, our approach directly models the light transport between the projector and the viewpoint using the light transport matrix (LTM). We show how to efficiently approximate the pseudo inverse of the LTM and apply it to find the optimal input image that produces least projection distortions. Furthermore, we present a projection defocus analysis for reflector and thin refractor based catadioptric projectors. We show that defocus blur can be interpreted as spatially-varying Gaussian blurs on the input image. We then measure the kernels directly from the LTM and apply deconvolution to optimize the input image. We demonstrate the practical uses of catadioptric projectors in panoramic and omni-directional projections. Our new system achieves much wider field-of-view projection while maintaining sharpness and low geometric and photometric distortions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a catadioptric projector analogous to a catadioptric camera by combining a commodity digital projector with additional optical units. We show that, by using specially shaped reflectors/refractors, catadioptric projectors can offer an unprecedented level of flexibility in aspect ratio, size, and field of view. We also present efficient algorithms to reduce projection artifacts in catadioptric projectors, such as distortions, scattering, and defocusing. Instead of recovering the reflector/refractor geometry, our approach directly models the light transport between the projector and the viewpoint using the light transport matrix (LTM). We show how to efficiently approximate the pseudo inverse of the LTM and apply it to find the optimal input image that produces least projection distortions. Furthermore, we present a projection defocus analysis for reflector and thin refractor based catadioptric projectors. We show that defocus blur can be interpreted as spatially-varying Gaussian blurs on the input image. We then measure the kernels directly from the LTM and apply deconvolution to optimize the input image. We demonstrate the practical uses of catadioptric projectors in panoramic and omni-directional projections. Our new system achieves much wider field-of-view projection while maintaining sharpness and low geometric and photometric distortions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a catadioptric projector analogous to a catadioptric camera by combining a commodity digital projector with additional optical units. We show that, by using specially shaped reflectors/refractors, catadioptric projectors can offer an unprecedented level of flexibility in aspect ratio, size, and field of view. We also present efficient algorithms to reduce projection artifacts in catadioptric projectors, such as distortions, scattering, and defocusing. Instead of recovering the reflector/refractor geometry, our approach directly models the light transport between the projector and the viewpoint using the light transport matrix (LTM). We show how to efficiently approximate the pseudo inverse of the LTM and apply it to find the optimal input image that produces least projection distortions. Furthermore, we present a projection defocus analysis for reflector and thin refractor based catadioptric projectors. We show that defocus blur can be interpreted as spatially-varying Gaussian blurs on the input image. We then measure the kernels directly from the LTM and apply deconvolution to optimize the input image. We demonstrate the practical uses of catadioptric projectors in panoramic and omni-directional projections. Our new system achieves much wider field-of-view projection while maintaining sharpness and low geometric and photometric distortions.",
"fno": "05206622",
"keywords": [
"Display Instrumentation",
"Light Refraction",
"Matrix Algebra",
"Optical Projectors",
"Catadioptric Camera",
"Digital Projector",
"Optical Units",
"Specially Shaped Reflectors",
"Specially Shaped Refractors",
"Catadioptric Projectors",
"Projection Artifacts",
"Reflector Geometry",
"Refractor Geometry",
"Light Transport Matrix",
"Least Projection Distortion",
"Projection Defocus Analysis",
"Thin Refractor",
"Spatially Varying Gaussian Blurs",
"Deconvolution",
"Panoramic Projection",
"Omnidirectional Projection",
"Field Of View Projection",
"Geometric Distortion",
"Photometric Distortion",
"Optical Refraction",
"Optical Distortion",
"Optical Scattering",
"Digital Cameras",
"Optical Variables Control",
"Light Scattering",
"Geometry",
"Solid Modeling",
"Predistortion",
"Distortion Measurement"
],
"authors": [
{
"affiliation": "University of Delaware, Newark, USA",
"fullName": "Yuanyuan Ding",
"givenName": null,
"surname": "Yuanyuan Ding",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Epson Research and Development Inc, San Jose, CA 95131, USA",
"fullName": "Jing Xiao",
"givenName": "Jing",
"surname": "Xiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hewlett-Packard Company, Palo Alto, CA 94304, USA",
"fullName": "Kar-Han Tan",
"givenName": "Kar-Han",
"surname": "Tan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Delaware, Newark, USA",
"fullName": "Jingyi Yu",
"givenName": null,
"surname": "Jingyi Yu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2528-2535",
"year": "2009",
"issn": "1063-6919",
"isbn": "978-1-4244-3992-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05206621",
"articleId": "12OmNz5JCcX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05206623",
"articleId": "12OmNCwCLtZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dim/2003/1991/0/19910217",
"title": "Multi-projectors for arbitrary surfaces without explicit calibration nor reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/2003/19910217/12OmNAoDhVM",
"parentPublication": {
"id": "proceedings/3dim/2003/1991/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2005/2660/0/237230108",
"title": "Multi-Planar Projection by Fixed-Center Pan-Tilt Projectors",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2005/237230108/12OmNBQC89T",
"parentPublication": {
"id": "proceedings/cvprw/2005/2660/0",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05) - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2018/2290/0/08343085",
"title": "A transformation analysis of 3D virtual object for projection mapping",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2018/08343085/12OmNvSbBz8",
"parentPublication": {
"id": "proceedings/icoin/2018/2290/0",
"title": "2018 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836461",
"title": "Mimicking an Object Using Multiple Projectors",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836461/12OmNwE9OV9",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543487",
"title": "Projector optical distortion calibration using Gray code patterns",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543487/12OmNxWcHf2",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2008/2242/0/04587788",
"title": "Automatic calibration of a single-projector catadioptric display system",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2008/04587788/12OmNy314cl",
"parentPublication": {
"id": "proceedings/cvpr/2008/2242/0",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2005/2660/0/237230112",
"title": "Handheld Projectors for Mixing Physical and Digital Textures",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2005/237230112/12OmNz2C1lo",
"parentPublication": {
"id": "proceedings/cvprw/2005/2660/0",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05) - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04810996",
"title": "A Distributed Cooperative Framework for Continuous Multi-Projector Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04810996/12OmNzV70vz",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446433",
"title": "A Calibration Method for Large-Scale Projection Based Floor Display System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446433/13bd1gJ1v0M",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007213",
"title": "Geometric and Photometric Consistency in a Mixed Video and Galvanoscopic Scanning Laser Projection Mapping System",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007213/13rRUxcsYLX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwJPMYe",
"title": "CVPR 2011 WORKSHOPS",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzwHvrs",
"doi": "10.1109/CVPRW.2011.5981686",
"title": "Projection defocus correction using adaptive kernel sampling and geometric correction in dual-planar environments",
"normalizedTitle": "Projection defocus correction using adaptive kernel sampling and geometric correction in dual-planar environments",
"abstract": "Defocus blur correction for projectors using a camera is useful when the projector is used in ad hoc environments. However, past literature has not explicitly considered the common situation when the projection surface includes a corner made up of two planar surfaces that abut each other, such as the ubiquitous office cubicle. In this paper, we advance the state of the art by demonstrating defocus correction in a non-parametric setting. Our method differs from prior methods in that (a) the luminance and chrominance channels are independently considered, and (b) a sparse sampling of the surface is used to discover the spatially varying defocus kernel.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Defocus blur correction for projectors using a camera is useful when the projector is used in ad hoc environments. However, past literature has not explicitly considered the common situation when the projection surface includes a corner made up of two planar surfaces that abut each other, such as the ubiquitous office cubicle. In this paper, we advance the state of the art by demonstrating defocus correction in a non-parametric setting. Our method differs from prior methods in that (a) the luminance and chrominance channels are independently considered, and (b) a sparse sampling of the surface is used to discover the spatially varying defocus kernel.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Defocus blur correction for projectors using a camera is useful when the projector is used in ad hoc environments. However, past literature has not explicitly considered the common situation when the projection surface includes a corner made up of two planar surfaces that abut each other, such as the ubiquitous office cubicle. In this paper, we advance the state of the art by demonstrating defocus correction in a non-parametric setting. Our method differs from prior methods in that (a) the luminance and chrominance channels are independently considered, and (b) a sparse sampling of the surface is used to discover the spatially varying defocus kernel.",
"fno": "05981686",
"keywords": [
"Image Restoration",
"Image Sampling",
"Image Sensors",
"Optical Projectors",
"Projection Defocus Blur Correction",
"Adaptive Kernel Sampling",
"Geometric Correction",
"Ad Hoc Environments",
"Projection Surface",
"Ubiquitous Office Cubicle",
"Luminance Channels",
"Chrominance Channels",
"Sparse Sampling",
"Kernel",
"Cameras",
"Measurement",
"Image Color Analysis",
"Geometry",
"Transmission Line Matrix Methods",
"Lenses"
],
"authors": [
{
"affiliation": "IITB-Monash Research Academy, Indian Institute of Technology Bombay",
"fullName": "Shamsuddin Ladha",
"givenName": "Shamsuddin",
"surname": "Ladha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Mathematical Sciences, Monash University",
"fullName": "Kate Smith-Miles",
"givenName": "Kate",
"surname": "Smith-Miles",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Indian Institute of Technology Bombay",
"fullName": "Sharat Chandran",
"givenName": "Sharat",
"surname": "Chandran",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-06-01T00:00:00",
"pubType": "proceedings",
"pages": "9-14",
"year": "2011",
"issn": "2160-7508",
"isbn": "978-1-4577-0529-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05981685",
"articleId": "12OmNx1Iw9l",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05981687",
"articleId": "12OmNrIJqD3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2008/2174/0/04761601",
"title": "Calibration of projector-camera systems from virtual mutual projection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a673",
"title": "Depth from Combining Defocus and Correspondence Using Light-Field Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a673/12OmNxETas6",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2013/6463/0/06528303",
"title": "High-rank coded aperture projection for extended depth of field",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2013/06528303/12OmNxUMHoq",
"parentPublication": {
"id": "proceedings/iccp/2013/6463/0",
"title": "2013 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2006/2825/0/04155780",
"title": "The Reverse Projection Correlation Principle for Depth from Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2006/04155780/12OmNxWuidZ",
"parentPublication": {
"id": "proceedings/3dpvt/2006/2825/0",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2007/0905/0/04161017",
"title": "A Personal Surround Environment: Projective Display with Correction for Display Surface Geometry and Extreme Lens Distortion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161017/12OmNy5R3CD",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2014/4337/0/4337a195",
"title": "Camera Matrix Calibration Using Circular Control Points and Separate Correction of the Geometric Distortion Field",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2014/4337a195/12OmNzWx0bU",
"parentPublication": {
"id": "proceedings/crv/2014/4337/0",
"title": "2014 Canadian Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2013/4983/0/4983a210",
"title": "Exploiting Color Constancy for Compensating Projected Images on Non-white Light Projection Screen",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2013/4983a210/12OmNzYwc4C",
"parentPublication": {
"id": "proceedings/crv/2013/4983/0",
"title": "2013 International Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2005/03/i0406",
"title": "A Geometric Approach to Shape from Defocus",
"doi": null,
"abstractUrl": "/journal/tp/2005/03/i0406/13rRUNvyaa9",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/03/07452621",
"title": "Shape Estimation from Shading, Defocus, and Correspondence Using Light-Field Angular Coherence",
"doi": null,
"abstractUrl": "/journal/tp/2017/03/07452621/13rRUxYIN5A",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/04/v0658",
"title": "Multifocal Projection: A Multiprojector Technique for Increasing Focal Depth",
"doi": null,
"abstractUrl": "/journal/tg/2006/04/v0658/13rRUypp57x",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmLnxlmJeE",
"doi": "10.1109/ICCV48922.2021.00264",
"title": "Single Image Defocus Deblurring Using Kernel-Sharing Parallel Atrous Convolutions",
"normalizedTitle": "Single Image Defocus Deblurring Using Kernel-Sharing Parallel Atrous Convolutions",
"abstract": "This paper proposes a novel deep learning approach for single image defocus deblurring based on inverse kernels. In a defocused image, the blur shapes are similar among pixels although the blur sizes can spatially vary. To utilize the property with inverse kernels, we exploit the observation that when only the size of a defocus blur changes while keeping the shape, the shape of the corresponding inverse kernel remains the same and only the scale changes. Based on the observation, we propose a kernel-sharing parallel atrous convolutional (KPAC) block specifically designed by incorporating the property of inverse kernels for single image defocus deblurring. To effectively simulate the invariant shapes of inverse kernels with different scales, KPAC shares the same convolutional weights among multiple atrous convolution layers. To efficiently simulate the varying scales of inverse kernels, KPAC consists of only a few atrous convolution layers with different dilations and learns per-pixel scale attentions to aggregate the outputs of the layers. KPAC also utilizes the shape attention to combine the outputs of multiple convolution filters in each atrous convolution layer, to deal with defocus blur with a slightly varying shape. We demonstrate that our approach achieves state-of-the-art performance with a much smaller number of parameters than previous methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a novel deep learning approach for single image defocus deblurring based on inverse kernels. In a defocused image, the blur shapes are similar among pixels although the blur sizes can spatially vary. To utilize the property with inverse kernels, we exploit the observation that when only the size of a defocus blur changes while keeping the shape, the shape of the corresponding inverse kernel remains the same and only the scale changes. Based on the observation, we propose a kernel-sharing parallel atrous convolutional (KPAC) block specifically designed by incorporating the property of inverse kernels for single image defocus deblurring. To effectively simulate the invariant shapes of inverse kernels with different scales, KPAC shares the same convolutional weights among multiple atrous convolution layers. To efficiently simulate the varying scales of inverse kernels, KPAC consists of only a few atrous convolution layers with different dilations and learns per-pixel scale attentions to aggregate the outputs of the layers. KPAC also utilizes the shape attention to combine the outputs of multiple convolution filters in each atrous convolution layer, to deal with defocus blur with a slightly varying shape. We demonstrate that our approach achieves state-of-the-art performance with a much smaller number of parameters than previous methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a novel deep learning approach for single image defocus deblurring based on inverse kernels. In a defocused image, the blur shapes are similar among pixels although the blur sizes can spatially vary. To utilize the property with inverse kernels, we exploit the observation that when only the size of a defocus blur changes while keeping the shape, the shape of the corresponding inverse kernel remains the same and only the scale changes. Based on the observation, we propose a kernel-sharing parallel atrous convolutional (KPAC) block specifically designed by incorporating the property of inverse kernels for single image defocus deblurring. To effectively simulate the invariant shapes of inverse kernels with different scales, KPAC shares the same convolutional weights among multiple atrous convolution layers. To efficiently simulate the varying scales of inverse kernels, KPAC consists of only a few atrous convolution layers with different dilations and learns per-pixel scale attentions to aggregate the outputs of the layers. KPAC also utilizes the shape attention to combine the outputs of multiple convolution filters in each atrous convolution layer, to deal with defocus blur with a slightly varying shape. We demonstrate that our approach achieves state-of-the-art performance with a much smaller number of parameters than previous methods.",
"fno": "281200c622",
"keywords": [
"Deep Learning",
"Computer Vision",
"Shape",
"Convolution",
"Aggregates",
"Kernel",
"Computational Photography"
],
"authors": [
{
"affiliation": "POSTECH",
"fullName": "Hyeongseok Son",
"givenName": "Hyeongseok",
"surname": "Son",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "POSTECH",
"fullName": "Junyong Lee",
"givenName": "Junyong",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "POSTECH",
"fullName": "Sunghyun Cho",
"givenName": "Sunghyun",
"surname": "Cho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "POSTECH",
"fullName": "Seungyong Lee",
"givenName": "Seungyong",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "2622-2630",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200c612",
"articleId": "1BmKyeIB4LS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200c631",
"articleId": "1BmFxzJc1tC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2016/5407/0/5407a592",
"title": "Discriminative Filters for Depth from Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a592/12OmNCmpcHL",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a828",
"title": "Accurate Camera Calibration Robust to Defocus Using a Smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a828/12OmNqGA4Zd",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a855",
"title": "Blur-Aware Disparity Estimation from Defocus Stereo Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a855/12OmNqGA51a",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a281",
"title": "Blur Calibration for Depth from Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a281/12OmNs59JP3",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a273",
"title": "What is a Good Model for Depth from Defocus?",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a273/12OmNvCzFe8",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032f381",
"title": "Estimating Defocus Blur via Rank of Local Patches",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032f381/12OmNvEQseS",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118c885",
"title": "Separable Kernel for Image Deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c885/12OmNy4r3TR",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/03/ttp2008030518",
"title": "Shape from Defocus via Diffusion",
"doi": null,
"abstractUrl": "/journal/tp/2008/03/ttp2008030518/13rRUygT7o9",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a436",
"title": "Blind Non-Uniform Motion Deblurring using Atrous Spatial Pyramid Deformable Convolution and Deblurring-Reblurring Consistency",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a436/1G56QToOMlq",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c034",
"title": "Iterative Filter Adaptive Network for Single Image Defocus Deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c034/1yeLtxpVVvO",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrNh0vs",
"title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)",
"acronym": "icat",
"groupId": "1001485",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvjyxGc",
"doi": "10.1109/ICAT.2013.6728916",
"title": "Countercurrent enhances acceleration sensation in galvanic vestibular stimulation",
"normalizedTitle": "Countercurrent enhances acceleration sensation in galvanic vestibular stimulation",
"abstract": "This study reports that the effect of galvanic vestibular stimulation (GVS) can be enhanced by giving a countercurrent before the normal current stimulation in the forward direction. In order to investigate the effect of the countercurrent on GVS, we applied various kinds of amplitudes and durations of the countercurrent before the normal stimulation. The strength of the effect was measured by the subjective response and body sway. As a result, the enhancing effect by the countercurrent does not only appear in the objective response but also in the subjective reports and we found that the effect of the countercurrent was enhanced in response to the amount of charges of the countercurrent before normal stimulations. Our result implies that there is a capacitor on the current path that enhances the GVS effect by the countercurrent.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study reports that the effect of galvanic vestibular stimulation (GVS) can be enhanced by giving a countercurrent before the normal current stimulation in the forward direction. In order to investigate the effect of the countercurrent on GVS, we applied various kinds of amplitudes and durations of the countercurrent before the normal stimulation. The strength of the effect was measured by the subjective response and body sway. As a result, the enhancing effect by the countercurrent does not only appear in the objective response but also in the subjective reports and we found that the effect of the countercurrent was enhanced in response to the amount of charges of the countercurrent before normal stimulations. Our result implies that there is a capacitor on the current path that enhances the GVS effect by the countercurrent.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study reports that the effect of galvanic vestibular stimulation (GVS) can be enhanced by giving a countercurrent before the normal current stimulation in the forward direction. In order to investigate the effect of the countercurrent on GVS, we applied various kinds of amplitudes and durations of the countercurrent before the normal stimulation. The strength of the effect was measured by the subjective response and body sway. As a result, the enhancing effect by the countercurrent does not only appear in the objective response but also in the subjective reports and we found that the effect of the countercurrent was enhanced in response to the amount of charges of the countercurrent before normal stimulations. Our result implies that there is a capacitor on the current path that enhances the GVS effect by the countercurrent.",
"fno": "06728916",
"keywords": [
"Acceleration",
"Electrodes",
"Capacitors",
"Current Measurement",
"Ear",
"Integrated Circuits",
"Educational Institutions",
"Countercurrent Stimulation",
"Galvanic Vestibular Stimulus",
"Sway Of Standing"
],
"authors": [
{
"affiliation": "Grad. Sch. of Inf. Sci. & Technol., Osaka Univ., Suita, Japan",
"fullName": "Kazuma Aoyama",
"givenName": "Kazuma",
"surname": "Aoyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Grad. Sch. of Inf. Sci. & Technol., Osaka Univ., Suita, Japan",
"fullName": "Hiroyuki Lizuka",
"givenName": "Hiroyuki",
"surname": "Lizuka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Grad. Sch. of Inf. Sci. & Technol., Osaka Univ., Suita, Japan",
"fullName": "Hideyuki Ando",
"givenName": "Hideyuki",
"surname": "Ando",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Grad. Sch. of Inf. Sci. & Technol., Osaka Univ., Suita, Japan",
"fullName": "Taro Maeda",
"givenName": "Taro",
"surname": "Maeda",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icat",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "116-121",
"year": "2013",
"issn": null,
"isbn": "978-4-904490-11-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06728915",
"articleId": "12OmNx2zjvN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06728917",
"articleId": "12OmNC8dgkV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2005/8929/0/01492799",
"title": "Virtual acceleration with galvanic vestibular stimulation in a virtual reality environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492799/12OmNwJPMZr",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2015/7143/0/7143a969",
"title": "Sensation Feedback and Muscle Response of Electrical Stimulation on the Upper Limb Skin: A Case Study",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2015/7143a969/12OmNxxdZNf",
"parentPublication": {
"id": "proceedings/icmtma/2015/7143/0",
"title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446403",
"title": "Effect of Electrical Stimulation Haptic Feedback on Perceptions of Softness-Hardness and Stickiness While Touching a Virtual Object",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446403/13bd1eSlytA",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2018/6909/0/690901a086",
"title": "A Possibility of Navigating Users by GVS",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2018/690901a086/13bd1eTtWYb",
"parentPublication": {
"id": "proceedings/nicoint/2018/6909/0",
"title": "2018 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/01/07560603",
"title": "Comparative Evaluation of Tactile Sensation by Electrical and Mechanical Stimulation",
"doi": null,
"abstractUrl": "/journal/th/2017/01/07560603/13rRUyp7tX5",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714040",
"title": "Omnidirectional Galvanic Vestibular Stimulation in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714040/1B0Y04eka8E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscer/2022/8478/0/847800a026",
"title": "Multichannel asynchronous electrical stimulation device relieves muscle fatigue caused by stimulation therapy",
"doi": null,
"abstractUrl": "/proceedings-article/iscer/2022/847800a026/1HbbCwGuMHC",
"parentPublication": {
"id": "proceedings/iscer/2022/8478/0",
"title": "2022 International Symposium on Control Engineering and Robotics (ISCER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a725",
"title": "Spicy-Sensation of Wasabi Enhancement Method Using Anodal Electric Stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a725/1J7Wk4tPX1u",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a250",
"title": "Features of brainwave induced by 3D auditory stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a250/1p1gt4ZgYVO",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a498",
"title": "Redirected Walking using Noisy Galvanic Vestibular Stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a498/1yeCU92Xt5K",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyuPL0g",
"title": "Proceedings of Virtual Reality",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "1999",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzCWG3u",
"doi": "10.1109/VR.1999.756956",
"title": "Vestibular Cues and Virtual Environments: Choosing the Magnitude of the Vestibular Cue",
"normalizedTitle": "Vestibular Cues and Virtual Environments: Choosing the Magnitude of the Vestibular Cue",
"abstract": "The design of virtual environments usually concentrates on constructing a realistic visual simulation and ignores the non-visual cues normally associated with moving through an environment.The lack of the normal complement of cues may contribute to cybersickness and may affect operator performance. In VRAIS'98 we described the effect of adding vestibular cues during passive linear motion and showed an unexpected dominance of the vestibular cue in determining the magnitude of the perceived motion. Here we vary the relative magnitude of the visual and vestibular cues and describe a simple linear summation model that predicts the resulting perceived magnitude of motion. The model suggests that designers of virtual reality displays should add vestibular information in a ratio of one to four with the visual motion to obtain convincing and accurate performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The design of virtual environments usually concentrates on constructing a realistic visual simulation and ignores the non-visual cues normally associated with moving through an environment.The lack of the normal complement of cues may contribute to cybersickness and may affect operator performance. In VRAIS'98 we described the effect of adding vestibular cues during passive linear motion and showed an unexpected dominance of the vestibular cue in determining the magnitude of the perceived motion. Here we vary the relative magnitude of the visual and vestibular cues and describe a simple linear summation model that predicts the resulting perceived magnitude of motion. The model suggests that designers of virtual reality displays should add vestibular information in a ratio of one to four with the visual motion to obtain convincing and accurate performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The design of virtual environments usually concentrates on constructing a realistic visual simulation and ignores the non-visual cues normally associated with moving through an environment.The lack of the normal complement of cues may contribute to cybersickness and may affect operator performance. In VRAIS'98 we described the effect of adding vestibular cues during passive linear motion and showed an unexpected dominance of the vestibular cue in determining the magnitude of the perceived motion. Here we vary the relative magnitude of the visual and vestibular cues and describe a simple linear summation model that predicts the resulting perceived magnitude of motion. The model suggests that designers of virtual reality displays should add vestibular information in a ratio of one to four with the visual motion to obtain convincing and accurate performance.",
"fno": "00930229",
"keywords": [],
"authors": [
{
"affiliation": "York University",
"fullName": "Laurence Harris",
"givenName": "Laurence",
"surname": "Harris",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "York University",
"fullName": "Michael Jenkin",
"givenName": "Michael",
"surname": "Jenkin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "York University",
"fullName": "Daniel C. Zikovitz",
"givenName": "Daniel C.",
"surname": "Zikovitz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1999-03-01T00:00:00",
"pubType": "proceedings",
"pages": "229",
"year": "1999",
"issn": "1087-8270",
"isbn": "0-7695-0093-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00930222",
"articleId": "12OmNz5s0Mi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00930237",
"articleId": "12OmNvFHfIa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIx7SE9LiM",
"doi": "10.1109/VR46266.2020.00090",
"title": "Comparative Evaluation of the Effects of Motion Control on Cybersickness in Immersive Virtual Environments",
"normalizedTitle": "Comparative Evaluation of the Effects of Motion Control on Cybersickness in Immersive Virtual Environments",
"abstract": "The commercialization and lowering costs of consumer grade Virtual Reality (VR) devices has made the technology increasingly accessible to users around the world. The usage of VR technology is often accompanied by an undesirable side effect called cybersickness. Cyber-sickness is the feeling of discomfort that occurs during VR experiences, producing symptoms similar to those of motion sickness. It continues to remain one of the biggest hurdles to the widespread adoption of VR, making it increasingly important to explore and understand the factors that influence its onset. In this work, we investigated the influence of the presence/absence of motion control on the onset and severity of cybersickness in an HMD based VR driving simulation employing steering as a travel metaphor. Towards this end, we conducted a between subjects study manipulating the presence of control between three experimental conditions, two of which (Driving condition and Yoked Pair condition) formed a yoked control design where every pair of drivers and their yoked pairs were exposed to identical vehicular motion stimuli created by participants in the driving condition. In the other condition (Autonomous Car condition), participants experienced a program driven autonomous vehicle simulation. Results indicated that participants in the Driving condition experienced higher levels of cybersickness than participants in the Yoked Pair condition. While these results don’t conform to findings from previous research which suggests that having control over motion reduces cybersickness, it seems to point towards the importance of the fidelity of the control metaphor’s feedback response in alleviating cybersickness. Simply allowing one control their motion may not readily alleviate cybersickness but could instead increase it in such HMD based VR driving simulations. It may hence be important to consider how well the control metaphor and its feedback matches users’ expectations if we want to successfully mitigate cybersickness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The commercialization and lowering costs of consumer grade Virtual Reality (VR) devices has made the technology increasingly accessible to users around the world. The usage of VR technology is often accompanied by an undesirable side effect called cybersickness. Cyber-sickness is the feeling of discomfort that occurs during VR experiences, producing symptoms similar to those of motion sickness. It continues to remain one of the biggest hurdles to the widespread adoption of VR, making it increasingly important to explore and understand the factors that influence its onset. In this work, we investigated the influence of the presence/absence of motion control on the onset and severity of cybersickness in an HMD based VR driving simulation employing steering as a travel metaphor. Towards this end, we conducted a between subjects study manipulating the presence of control between three experimental conditions, two of which (Driving condition and Yoked Pair condition) formed a yoked control design where every pair of drivers and their yoked pairs were exposed to identical vehicular motion stimuli created by participants in the driving condition. In the other condition (Autonomous Car condition), participants experienced a program driven autonomous vehicle simulation. Results indicated that participants in the Driving condition experienced higher levels of cybersickness than participants in the Yoked Pair condition. While these results don’t conform to findings from previous research which suggests that having control over motion reduces cybersickness, it seems to point towards the importance of the fidelity of the control metaphor’s feedback response in alleviating cybersickness. Simply allowing one control their motion may not readily alleviate cybersickness but could instead increase it in such HMD based VR driving simulations. It may hence be important to consider how well the control metaphor and its feedback matches users’ expectations if we want to successfully mitigate cybersickness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The commercialization and lowering costs of consumer grade Virtual Reality (VR) devices has made the technology increasingly accessible to users around the world. The usage of VR technology is often accompanied by an undesirable side effect called cybersickness. Cyber-sickness is the feeling of discomfort that occurs during VR experiences, producing symptoms similar to those of motion sickness. It continues to remain one of the biggest hurdles to the widespread adoption of VR, making it increasingly important to explore and understand the factors that influence its onset. In this work, we investigated the influence of the presence/absence of motion control on the onset and severity of cybersickness in an HMD based VR driving simulation employing steering as a travel metaphor. Towards this end, we conducted a between subjects study manipulating the presence of control between three experimental conditions, two of which (Driving condition and Yoked Pair condition) formed a yoked control design where every pair of drivers and their yoked pairs were exposed to identical vehicular motion stimuli created by participants in the driving condition. In the other condition (Autonomous Car condition), participants experienced a program driven autonomous vehicle simulation. Results indicated that participants in the Driving condition experienced higher levels of cybersickness than participants in the Yoked Pair condition. While these results don’t conform to findings from previous research which suggests that having control over motion reduces cybersickness, it seems to point towards the importance of the fidelity of the control metaphor’s feedback response in alleviating cybersickness. Simply allowing one control their motion may not readily alleviate cybersickness but could instead increase it in such HMD based VR driving simulations. It may hence be important to consider how well the control metaphor and its feedback matches users’ expectations if we want to successfully mitigate cybersickness.",
"fno": "09089513",
"keywords": [
"Helmet Mounted Displays",
"Motion Control",
"Virtual Reality",
"Motion Control",
"Immersive Virtual Environments",
"VR Technology",
"VR Experiences",
"Motion Sickness",
"Yoked Control Design",
"Autonomous Car",
"VR Driving Simulations",
"Cybersickness Alleviation",
"Vehicular Motion Stimuli",
"Yoked Pair Condition",
"Virtual Reality Devices",
"Virtual Environments",
"Virtual Reality",
"Feedback",
"Motion Control",
"Simulation",
"Cybersickness",
"Human Centered Computing",
"Empirical Studies In HCI",
"Human Centered Computing",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Clemson University",
"fullName": "Roshan Venkatakrishnan",
"givenName": "Roshan",
"surname": "Venkatakrishnan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Rohith Venkatakrishnan",
"givenName": "Rohith",
"surname": "Venkatakrishnan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Key Lime Interactive",
"fullName": "Ayush Bhargava",
"givenName": "Ayush",
"surname": "Bhargava",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Kathryn Lucaites",
"givenName": "Kathryn",
"surname": "Lucaites",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Hannah Solini",
"givenName": "Hannah",
"surname": "Solini",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Matias Volonte",
"givenName": "Matias",
"surname": "Volonte",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Andrew Robb",
"givenName": "Andrew",
"surname": "Robb",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NCTU",
"fullName": "Sabarish V Babu",
"givenName": "Sabarish V",
"surname": "Babu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NCTU",
"fullName": "Wen-Chieh Lin",
"givenName": "Wen-Chieh",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Yun-Xuan Lin",
"givenName": "Yun-Xuan",
"surname": "Lin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "672-681",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089634",
"articleId": "1jIxcsLcYes",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089551",
"articleId": "1jIx95ncylO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/5555/01/09737429",
"title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a103",
"title": "Asymmetric Lateral Field-of-View Restriction to Mitigate Cybersickness During Virtual Turns",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a103/1CJbKPRegGk",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a428",
"title": "You’re in for a Bumpy Ride! Uneven Terrain Increases Cybersickness While Navigating with Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a428/1CJbKYSq2Vq",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a528",
"title": "Human Factors Related to Cybersickness Tolerance in Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a528/1CJcDQEpCqA",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a307",
"title": "Demographic and Behavioral Correlates of Cybersickness: A Large Lab-in-the-Field Study of 837 Participants",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a307/1JrRjge0g6I",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049731",
"title": "Cybersickness, Cognition, & Motor Skills: The Effects of Music, Gender, and Gaming Experience",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049731/1KYow8CUV20",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797728",
"title": "Towards an Immersive Driving Simulator to Study Factors Related to Cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797728/1cJ110fSqvm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089551",
"title": "A Structural Equation Modeling Approach to Understand the Relationship between Control, Cybersickness and Presence in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089551/1jIx95ncylO",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090510",
"title": "Towards an Immersive Virtual Simulation for Studying Cybersickness during Spatial Knowledge Acquisition",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090510/1jIxpN6Ecta",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a123",
"title": "Exploring the feasibility of mitigating VR-HMD-induced cybersickness using cathodal transcranial direct current stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a123/1qpzDMNZnKo",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIx95ncylO",
"doi": "10.1109/VR46266.2020.00091",
"title": "A Structural Equation Modeling Approach to Understand the Relationship between Control, Cybersickness and Presence in Virtual Reality",
"normalizedTitle": "A Structural Equation Modeling Approach to Understand the Relationship between Control, Cybersickness and Presence in Virtual Reality",
"abstract": "The commercialization of Virtual Reality (VR) devices is making the technology increasingly accessible to users around the world. Despite the success that VR is starting to see with its growing popularity, it has yet to become widely adopted and achieve its ultimate goal- convincingly simulate real life like experiences. The inability to generate adequate levels of presence and to prevent the manifestation of cybersickness are the two prominent barriers that have hindered VR from achieving its ultimate goal. While traditional research has examined factors that influence (correlate with) the onset and severity cybersickness, there is still a gap in our knowledge about the consequences of having motion control on cybersickness in immersive virtual environments (IVE’s) achieved using tracked Head Mounted Displays (HMD’s). Furthermore, outside of a correlational capacity, it is still unclear as to what causes cybersickness to affect presence in immersive virtual environments. The success of immersive virtual reality as a technology will hence largely come down to our ability to understand the interrelationship between these variables and then address the challenges they pose. Towards this end, we investigated how the affordance of motion control affects cybersickness and presence in an HMD based VR driving simulation by conducting a between subjects study where we manipulated the affordance of control between three experimental conditions. We leverage structural equation modeling in an attempt to build a framework that explains the relationship between virtual motion control, workload, cybersickness, time spent in the simulation, perceived time and presence. Our structural model helps explain why motion control could be an important factor to consider in addressing VR’s challenges and realizing its ultimate aim to simulate reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The commercialization of Virtual Reality (VR) devices is making the technology increasingly accessible to users around the world. Despite the success that VR is starting to see with its growing popularity, it has yet to become widely adopted and achieve its ultimate goal- convincingly simulate real life like experiences. The inability to generate adequate levels of presence and to prevent the manifestation of cybersickness are the two prominent barriers that have hindered VR from achieving its ultimate goal. While traditional research has examined factors that influence (correlate with) the onset and severity cybersickness, there is still a gap in our knowledge about the consequences of having motion control on cybersickness in immersive virtual environments (IVE’s) achieved using tracked Head Mounted Displays (HMD’s). Furthermore, outside of a correlational capacity, it is still unclear as to what causes cybersickness to affect presence in immersive virtual environments. The success of immersive virtual reality as a technology will hence largely come down to our ability to understand the interrelationship between these variables and then address the challenges they pose. Towards this end, we investigated how the affordance of motion control affects cybersickness and presence in an HMD based VR driving simulation by conducting a between subjects study where we manipulated the affordance of control between three experimental conditions. We leverage structural equation modeling in an attempt to build a framework that explains the relationship between virtual motion control, workload, cybersickness, time spent in the simulation, perceived time and presence. Our structural model helps explain why motion control could be an important factor to consider in addressing VR’s challenges and realizing its ultimate aim to simulate reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The commercialization of Virtual Reality (VR) devices is making the technology increasingly accessible to users around the world. Despite the success that VR is starting to see with its growing popularity, it has yet to become widely adopted and achieve its ultimate goal- convincingly simulate real life like experiences. The inability to generate adequate levels of presence and to prevent the manifestation of cybersickness are the two prominent barriers that have hindered VR from achieving its ultimate goal. While traditional research has examined factors that influence (correlate with) the onset and severity cybersickness, there is still a gap in our knowledge about the consequences of having motion control on cybersickness in immersive virtual environments (IVE’s) achieved using tracked Head Mounted Displays (HMD’s). Furthermore, outside of a correlational capacity, it is still unclear as to what causes cybersickness to affect presence in immersive virtual environments. The success of immersive virtual reality as a technology will hence largely come down to our ability to understand the interrelationship between these variables and then address the challenges they pose. Towards this end, we investigated how the affordance of motion control affects cybersickness and presence in an HMD based VR driving simulation by conducting a between subjects study where we manipulated the affordance of control between three experimental conditions. We leverage structural equation modeling in an attempt to build a framework that explains the relationship between virtual motion control, workload, cybersickness, time spent in the simulation, perceived time and presence. Our structural model helps explain why motion control could be an important factor to consider in addressing VR’s challenges and realizing its ultimate aim to simulate reality.",
"fno": "09089551",
"keywords": [
"Control Engineering Computing",
"Ergonomics",
"Helmet Mounted Displays",
"Human Factors",
"Motion Control",
"Statistical Analysis",
"Virtual Reality",
"Severity Cybersickness",
"Immersive Virtual Reality",
"Virtual Motion Control",
"Structural Equation Modeling Approach",
"Tracked Head Mounted Displays",
"HMD Based VR Driving Simulation",
"Virtual Environments",
"Cybersickness",
"Virtual Reality",
"Motion Control",
"Human Computer Interaction",
"Human Centered Computing",
"Empirical Studies In HCI",
"Human Centered Computing",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Clemson University",
"fullName": "Rohith Venkatakrishnan",
"givenName": "Rohith",
"surname": "Venkatakrishnan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Roshan Venkatakrishnan",
"givenName": "Roshan",
"surname": "Venkatakrishnan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Reza Ghaiumy Anaraky",
"givenName": "Reza Ghaiumy",
"surname": "Anaraky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Matias Volonte",
"givenName": "Matias",
"surname": "Volonte",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Bart Knijnenburg",
"givenName": "Bart",
"surname": "Knijnenburg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Sabarish V Babu",
"givenName": "Sabarish V",
"surname": "Babu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "682-691",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089513",
"articleId": "1jIx7SE9LiM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089533",
"articleId": "1jIx7JtSOTC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/5555/01/09737429",
"title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a103",
"title": "Asymmetric Lateral Field-of-View Restriction to Mitigate Cybersickness During Virtual Turns",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a103/1CJbKPRegGk",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a428",
"title": "You’re in for a Bumpy Ride! Uneven Terrain Increases Cybersickness While Navigating with Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a428/1CJbKYSq2Vq",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a528",
"title": "Human Factors Related to Cybersickness Tolerance in Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a528/1CJcDQEpCqA",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a307",
"title": "Demographic and Behavioral Correlates of Cybersickness: A Large Lab-in-the-Field Study of 837 Participants",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a307/1JrRjge0g6I",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797728",
"title": "Towards an Immersive Driving Simulator to Study Factors Related to Cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797728/1cJ110fSqvm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089513",
"title": "Comparative Evaluation of the Effects of Motion Control on Cybersickness in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089513/1jIx7SE9LiM",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090623",
"title": "Using Screen Capture Video to Understand Learning in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090623/1jIxukBU3g4",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2022/06/09580681",
"title": "Modeling and Defense of Social Virtual Reality Attacks Inducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tq/2022/06/09580681/1xPo5KfQN1K",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a138",
"title": "Using Trajectory Compression Rate to Predict Changes in Cybersickness in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a138/1yeD4ffM0c8",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1qpzz6dhLLq",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qpzzTXUIgw",
"doi": "10.1109/AIVR50618.2020.00072",
"title": "A Review of Deep Learning Approaches to EEG-Based Classification of Cybersickness in Virtual Reality",
"normalizedTitle": "A Review of Deep Learning Approaches to EEG-Based Classification of Cybersickness in Virtual Reality",
"abstract": "Cybersickness is an unpleasant side effect of exposure to a virtual reality (VR) experience and refers to such physiological repercussions as nausea and dizziness triggered in response to VR exposure. Given the debilitating effect of cybersickness on the user experience in VR, academic interest in the automatic detection of cybersickness from physiological measurements has crested in recent years. Electroencephalography (EEG) has been extensively used to capture changes in electrical activity in the brain and to automatically classify cybersickness from brainwaves using a variety of machine learning algorithms. Recent advances in deep learning (DL) algorithms and increasing availability of computational resources for DL have paved the way for a new area of research into the application of DL frameworks to EEGbased detection of cybersickness. Accordingly, this review involved a systematic review of the peer-reviewed papers concerned with the application of DL frameworks to the classification of cybersickness from EEG signals. The relevant literature was identified through exhaustive database searches, and the papers were scrutinized with respect to experimental protocols for data collection, data preprocessing, and DL architectures. The review revealed a limited number of studies in this nascent area of research and showed that the DL frameworks reported in these studies (i.e., DNN, CNN, and RNN) could classify cybersickness with an average accuracy rate of 93%. This review provides a summary of the trends and issues in the application of DL frameworks to the EEG-based detection of cybersickness, with some guidelines for future research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cybersickness is an unpleasant side effect of exposure to a virtual reality (VR) experience and refers to such physiological repercussions as nausea and dizziness triggered in response to VR exposure. Given the debilitating effect of cybersickness on the user experience in VR, academic interest in the automatic detection of cybersickness from physiological measurements has crested in recent years. Electroencephalography (EEG) has been extensively used to capture changes in electrical activity in the brain and to automatically classify cybersickness from brainwaves using a variety of machine learning algorithms. Recent advances in deep learning (DL) algorithms and increasing availability of computational resources for DL have paved the way for a new area of research into the application of DL frameworks to EEGbased detection of cybersickness. Accordingly, this review involved a systematic review of the peer-reviewed papers concerned with the application of DL frameworks to the classification of cybersickness from EEG signals. The relevant literature was identified through exhaustive database searches, and the papers were scrutinized with respect to experimental protocols for data collection, data preprocessing, and DL architectures. The review revealed a limited number of studies in this nascent area of research and showed that the DL frameworks reported in these studies (i.e., DNN, CNN, and RNN) could classify cybersickness with an average accuracy rate of 93%. This review provides a summary of the trends and issues in the application of DL frameworks to the EEG-based detection of cybersickness, with some guidelines for future research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cybersickness is an unpleasant side effect of exposure to a virtual reality (VR) experience and refers to such physiological repercussions as nausea and dizziness triggered in response to VR exposure. Given the debilitating effect of cybersickness on the user experience in VR, academic interest in the automatic detection of cybersickness from physiological measurements has crested in recent years. Electroencephalography (EEG) has been extensively used to capture changes in electrical activity in the brain and to automatically classify cybersickness from brainwaves using a variety of machine learning algorithms. Recent advances in deep learning (DL) algorithms and increasing availability of computational resources for DL have paved the way for a new area of research into the application of DL frameworks to EEGbased detection of cybersickness. Accordingly, this review involved a systematic review of the peer-reviewed papers concerned with the application of DL frameworks to the classification of cybersickness from EEG signals. The relevant literature was identified through exhaustive database searches, and the papers were scrutinized with respect to experimental protocols for data collection, data preprocessing, and DL architectures. The review revealed a limited number of studies in this nascent area of research and showed that the DL frameworks reported in these studies (i.e., DNN, CNN, and RNN) could classify cybersickness with an average accuracy rate of 93%. This review provides a summary of the trends and issues in the application of DL frameworks to the EEG-based detection of cybersickness, with some guidelines for future research.",
"fno": "746300a351",
"keywords": [
"Convolutional Neural Nets",
"Deep Learning Artificial Intelligence",
"Electroencephalography",
"Medical Computing",
"Medical Signal Processing",
"Reviews",
"Virtual Reality",
"RNN",
"CNN",
"DNN",
"Electroencephalography",
"Dizziness",
"Nausea",
"Physiological Repercussions",
"EEG Based Classification",
"VR",
"Virtual Reality Experience",
"Cybersickness",
"Deep Learning Approaches",
"Cybersickness",
"Electroencephalography",
"Headphones",
"Videos",
"Visualization",
"Deep Learning",
"Wireless Communication",
"Cybersickness",
"Deep Learning",
"EEG",
"EEG Based",
"Brainwaves",
"Neural Networks"
],
"authors": [
{
"affiliation": "Northeastern University,Khoury College of Computer Sciences,Boston,MA",
"fullName": "Caglar Yildirim",
"givenName": "Caglar",
"surname": "Yildirim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "351-357",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7463-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "746300a345",
"articleId": "1qpzDaHLzhu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "746300a358",
"articleId": "1qpzAYILRRe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446194",
"title": "Cybersickness-Provoking Virtual Reality Alters Brain Signals of Persons with Multiple Sclerosis",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446194/13bd1gzWkQm",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2018/7516/3/08549968",
"title": "Identifying Severity Level of Cybersickness from EEG signals using CN2 Rule Induction Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2018/08549968/17D45VsBU0I",
"parentPublication": {
"id": "proceedings/iciibms/2018/7516/3",
"title": "2018 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a777",
"title": "TruVR: Trustworthy Cybersickness Detection using Explainable Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a777/1JrR1CsIUjC",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a609",
"title": "LiteVR: Interpretable and Lightweight Cybersickness Detection using Explainable AI",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a609/1MNgzF7scM0",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797858",
"title": "Immersive EEG: Evaluating Electroencephalography in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797858/1cJ0JWkSE3m",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798334",
"title": "Cybersickness Analysis with EEG Using Deep Learning Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798334/1cJ1btj7kFG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0579",
"title": "A Deep Cybersickness Predictor Based on Brain Signal Analysis for Virtual Reality Contents",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0579/1hQqiL3Qevm",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2020/9574/0/957400a538",
"title": "Unsupervised EEG Cybersickness Prediction with Deep Embedded Self Organizing Map",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2020/957400a538/1pBMvwX0M3m",
"parentPublication": {
"id": "proceedings/bibe/2020/9574/0",
"title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a156",
"title": "A new device to restore sensory congruency in virtual reality and to prevent cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a156/1tnWwDLMCAw",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a138",
"title": "Using Trajectory Compression Rate to Predict Changes in Cybersickness in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a138/1yeD4ffM0c8",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXnofrJRu",
"doi": "10.1109/VRW52623.2021.00125",
"title": "Visual Techniques to Reduce Cybersickness in Virtual Reality",
"normalizedTitle": "Visual Techniques to Reduce Cybersickness in Virtual Reality",
"abstract": "Cybersickness is a unpleasant phenomenon caused by the visually induced impression of ego-motion while in fact being seated. To reduce its negative impact in VR experiences, we analyze the effectiveness of two techniques - peripheral blurring and field of view reduction - through an experiment in an interactive race game environment displayed with a commercial head-mounted display with integrated eye tracker. To measure the level of discomfort experienced by our participants, we utilize self-report and physiological measurements. Our results indicate that, among both techniques, reducing the displayed field of view up to 10 degrees is most efficient to mitigate cybersickness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cybersickness is a unpleasant phenomenon caused by the visually induced impression of ego-motion while in fact being seated. To reduce its negative impact in VR experiences, we analyze the effectiveness of two techniques - peripheral blurring and field of view reduction - through an experiment in an interactive race game environment displayed with a commercial head-mounted display with integrated eye tracker. To measure the level of discomfort experienced by our participants, we utilize self-report and physiological measurements. Our results indicate that, among both techniques, reducing the displayed field of view up to 10 degrees is most efficient to mitigate cybersickness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cybersickness is a unpleasant phenomenon caused by the visually induced impression of ego-motion while in fact being seated. To reduce its negative impact in VR experiences, we analyze the effectiveness of two techniques - peripheral blurring and field of view reduction - through an experiment in an interactive race game environment displayed with a commercial head-mounted display with integrated eye tracker. To measure the level of discomfort experienced by our participants, we utilize self-report and physiological measurements. Our results indicate that, among both techniques, reducing the displayed field of view up to 10 degrees is most efficient to mitigate cybersickness.",
"fno": "405700a486",
"keywords": [
"Computer Games",
"Eye",
"Helmet Mounted Displays",
"Virtual Reality",
"Visual Techniques",
"Reduce Cybersickness",
"Virtual Reality",
"Unpleasant Phenomenon",
"Visually Induced Impression",
"Ego Motion",
"VR Experiences",
"Peripheral Blurring Field",
"View Reduction",
"Interactive Race Game Environment",
"Commercial Head Mounted Display",
"Integrated Eye Tracker",
"Physiological Measurements",
"Displayed Field",
"View Up To 10 Degrees",
"Legged Locomotion",
"Visualization",
"Three Dimensional Displays",
"Head Mounted Displays",
"Cybersickness",
"Atmospheric Measurements",
"Conferences",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Virtual Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Empirical Studies In HCI",
"Applied Computing",
"Consumer Health"
],
"authors": [
{
"affiliation": "Institut für Computergraphik, TU Braunschweig",
"fullName": "Colin Groth",
"givenName": "Colin",
"surname": "Groth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institut für Computergraphik, TU Braunschweig",
"fullName": "Jan-Philipp Tauscher",
"givenName": "Jan-Philipp",
"surname": "Tauscher",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institut für Computergraphik, TU Braunschweig",
"fullName": "Nikkel Heesen",
"givenName": "Nikkel",
"surname": "Heesen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institut für Computergraphik, TU Braunschweig",
"fullName": "Susana Castillo",
"givenName": "Susana",
"surname": "Castillo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institut für Computergraphik, TU Braunschweig",
"fullName": "Marcus Magnor",
"givenName": "Marcus",
"surname": "Magnor",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "486-487",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a484",
"articleId": "1tnXe7x7IGI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a488",
"articleId": "1tnWM8KLu0w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgiv/2009/3789/0/3789a486",
"title": "Estimating Cybersickness of Simulated Motion Using the Simulator Sickness Questionnaire (SSQ): A Controlled Study",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a486/12OmNAOKnYL",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131742",
"title": "Methods to reduce cybersickness and enhance presence for in-place navigation techniques",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131742/12OmNyxFKaM",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714040",
"title": "Omnidirectional Galvanic Vestibular Stimulation in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714040/1B0Y04eka8E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09737429",
"title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a103",
"title": "Asymmetric Lateral Field-of-View Restriction to Mitigate Cybersickness During Virtual Turns",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a103/1CJbKPRegGk",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a528",
"title": "Human Factors Related to Cybersickness Tolerance in Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a528/1CJcDQEpCqA",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a533",
"title": "Getting the Most out of Virtual Reality: Evaluating Short Breaks to Reduce Cybersickness and Cognitive Aftereffects",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a533/1CJfa6K7KXm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a351",
"title": "A Review of Deep Learning Approaches to EEG-Based Classification of Cybersickness in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a351/1qpzzTXUIgw",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a148",
"title": "CyberSense: A Closed-Loop Framework to Detect Cybersickness Severity and Adaptively apply Reduction Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a148/1tnWZDrIad2",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a156",
"title": "A new device to restore sensory congruency in virtual reality and to prevent cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a156/1tnWwDLMCAw",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeD4ffM0c8",
"doi": "10.1109/ISMAR52148.2021.00028",
"title": "Using Trajectory Compression Rate to Predict Changes in Cybersickness in Virtual Reality Games",
"normalizedTitle": "Using Trajectory Compression Rate to Predict Changes in Cybersickness in Virtual Reality Games",
"abstract": "Identifying cybersickness in virtual reality (VR) applications such as games in a fast, precise, non-intrusive, and non-disruptive way remains challenging. Several factors can cause cybersickness, and their identification will help find its origins and prevent or minimize it. One such factor is virtual movement. Movement, whether physical or virtual, can be represented in different forms. One way to represent and store it is with a temporally annotated point sequence. Because a sequence is memory-consuming, it is often preferable to save it in a compressed form. Compression allows redundant data to be eliminated while still preserving changes in speed and direction. Since changes in direction and velocity in VR can be associated with cybersickness, changes in compression rate can likely indicate changes in cybersickness levels. In this research, we explore whether quantifying changes in virtual movement can be used to estimate variation in cybersickness levels of VR users. We investigate the correlation between changes in the compression rate of movement data in two VR games with changes in players’ cybersickness levels captured during gameplay. Our results show (1) a clear correlation between changes in compression rate and cybersickness, and (2) that a machine learning approach can be used to identify these changes. Finally, results from a second experiment show that our approach is feasible for cybersickness inference in games and other VR applications that involve movement.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Identifying cybersickness in virtual reality (VR) applications such as games in a fast, precise, non-intrusive, and non-disruptive way remains challenging. Several factors can cause cybersickness, and their identification will help find its origins and prevent or minimize it. One such factor is virtual movement. Movement, whether physical or virtual, can be represented in different forms. One way to represent and store it is with a temporally annotated point sequence. Because a sequence is memory-consuming, it is often preferable to save it in a compressed form. Compression allows redundant data to be eliminated while still preserving changes in speed and direction. Since changes in direction and velocity in VR can be associated with cybersickness, changes in compression rate can likely indicate changes in cybersickness levels. In this research, we explore whether quantifying changes in virtual movement can be used to estimate variation in cybersickness levels of VR users. We investigate the correlation between changes in the compression rate of movement data in two VR games with changes in players’ cybersickness levels captured during gameplay. Our results show (1) a clear correlation between changes in compression rate and cybersickness, and (2) that a machine learning approach can be used to identify these changes. Finally, results from a second experiment show that our approach is feasible for cybersickness inference in games and other VR applications that involve movement.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Identifying cybersickness in virtual reality (VR) applications such as games in a fast, precise, non-intrusive, and non-disruptive way remains challenging. Several factors can cause cybersickness, and their identification will help find its origins and prevent or minimize it. One such factor is virtual movement. Movement, whether physical or virtual, can be represented in different forms. One way to represent and store it is with a temporally annotated point sequence. Because a sequence is memory-consuming, it is often preferable to save it in a compressed form. Compression allows redundant data to be eliminated while still preserving changes in speed and direction. Since changes in direction and velocity in VR can be associated with cybersickness, changes in compression rate can likely indicate changes in cybersickness levels. In this research, we explore whether quantifying changes in virtual movement can be used to estimate variation in cybersickness levels of VR users. We investigate the correlation between changes in the compression rate of movement data in two VR games with changes in players’ cybersickness levels captured during gameplay. Our results show (1) a clear correlation between changes in compression rate and cybersickness, and (2) that a machine learning approach can be used to identify these changes. Finally, results from a second experiment show that our approach is feasible for cybersickness inference in games and other VR applications that involve movement.",
"fno": "015800a138",
"keywords": [
"Computer Games",
"Ergonomics",
"Learning Artificial Intelligence",
"Virtual Reality",
"Compressed Form",
"Cybersickness Levels",
"Virtual Movement",
"VR Users",
"Movement Data",
"VR Games",
"Players",
"Cybersickness Inference",
"VR Applications",
"Trajectory Compression Rate",
"Predict Changes",
"Virtual Reality Games",
"Identifying Cybersickness",
"Virtual Reality Applications",
"Temporally Annotated Point Sequence",
"Correlation",
"Cybersickness",
"Design Methodology",
"Neural Networks",
"Games",
"Machine Learning",
"Hardware",
"Human Centered Computing",
"Empirical Studies In HCI",
"Virtual Reality Human Centered Computing",
"HCI Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "Xi’an Jiaotong-Liverpool University DMT Lab Birmingham City University,Department of Computing",
"fullName": "Diego Monteiro",
"givenName": "Diego",
"surname": "Monteiro",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Department of Computing",
"fullName": "Hai-Ning Liang",
"givenName": "Hai-Ning",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Department of Computing",
"fullName": "Xiaohang Tang",
"givenName": "Xiaohang",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Manitoba,Department of Computer Science",
"fullName": "Pourang Irani",
"givenName": "Pourang",
"surname": "Irani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "138-146",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "015800a128",
"articleId": "1yeCWKEosp2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a147",
"articleId": "1yeCYy4wcZa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/5555/01/09737429",
"title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a103",
"title": "Asymmetric Lateral Field-of-View Restriction to Mitigate Cybersickness During Virtual Turns",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a103/1CJbKPRegGk",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a528",
"title": "Human Factors Related to Cybersickness Tolerance in Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a528/1CJcDQEpCqA",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2022/02/09779506",
"title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset",
"doi": null,
"abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a777",
"title": "TruVR: Trustworthy Cybersickness Detection using Explainable Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a777/1JrR1CsIUjC",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a307",
"title": "Demographic and Behavioral Correlates of Cybersickness: A Large Lab-in-the-Field Study of 837 Participants",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a307/1JrRjge0g6I",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049731",
"title": "Cybersickness, Cognition, & Motor Skills: The Effects of Music, Gender, and Gaming Experience",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049731/1KYow8CUV20",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089513",
"title": "Comparative Evaluation of the Effects of Motion Control on Cybersickness in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089513/1jIx7SE9LiM",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a351",
"title": "A Review of Deep Learning Approaches to EEG-Based Classification of Cybersickness in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a351/1qpzzTXUIgw",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a373",
"title": "Using Fuzzy Logic to Involve Individual Differences for Predicting Cybersickness during VR Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a373/1tuAPQPWR2g",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBubORS",
"title": "2015 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCcKQh6",
"doi": "10.1109/CW.2015.14",
"title": "Crowd Simulation by Applying Individual Human Model with Vision",
"normalizedTitle": "Crowd Simulation by Applying Individual Human Model with Vision",
"abstract": "One of the most difficult tasks in the computer graphics field is the simulation of human behavior. Some researchers have already tried to generate human behavior in a virtual space by introducing psychology and/or by considering collision avoidance methods. Many previous works, however, have a control center that knows everything about individual position and going direction and so forth, and informs some of it to each person. In addition, individual and crowd behavior are different and treated separately. This paper proposes a virtual human model who has his/her own eye with which each person can obtain the information necessary for their actions by themselves. In addition, we have tried to simulate crowd behavior by applying the individual human model for many people without introducing particle or flocking systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One of the most difficult tasks in the computer graphics field is the simulation of human behavior. Some researchers have already tried to generate human behavior in a virtual space by introducing psychology and/or by considering collision avoidance methods. Many previous works, however, have a control center that knows everything about individual position and going direction and so forth, and informs some of it to each person. In addition, individual and crowd behavior are different and treated separately. This paper proposes a virtual human model who has his/her own eye with which each person can obtain the information necessary for their actions by themselves. In addition, we have tried to simulate crowd behavior by applying the individual human model for many people without introducing particle or flocking systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One of the most difficult tasks in the computer graphics field is the simulation of human behavior. Some researchers have already tried to generate human behavior in a virtual space by introducing psychology and/or by considering collision avoidance methods. Many previous works, however, have a control center that knows everything about individual position and going direction and so forth, and informs some of it to each person. In addition, individual and crowd behavior are different and treated separately. This paper proposes a virtual human model who has his/her own eye with which each person can obtain the information necessary for their actions by themselves. In addition, we have tried to simulate crowd behavior by applying the individual human model for many people without introducing particle or flocking systems.",
"fno": "9403a210",
"keywords": [
"Legged Locomotion",
"Visualization",
"Computational Modeling",
"Aerospace Electronics",
"Solid Modeling",
"Deformable Models",
"Cities And Towns",
"Crowd Simulation",
"Computer Graphics",
"Cyberspace",
"Virtual Human",
"Human Behavior"
],
"authors": [
{
"affiliation": null,
"fullName": "Nobuhiko Mukai",
"givenName": "Nobuhiko",
"surname": "Mukai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kensuke Tanaka",
"givenName": "Kensuke",
"surname": "Tanaka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Youngha Chang",
"givenName": "Youngha",
"surname": "Chang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-10-01T00:00:00",
"pubType": "proceedings",
"pages": "210-215",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-9403-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "9403a204",
"articleId": "12OmNAOKnSy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "9403a216",
"articleId": "12OmNvvc5NH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2013/5108/0/5108a877",
"title": "Reconstructing Individual Mobility from Smart Card Transactions: A Space Alignment Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2013/5108a877/12OmNAo45MN",
"parentPublication": {
"id": "proceedings/icdm/2013/5108/0",
"title": "2013 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2013/4892/0/4892a156",
"title": "Crowd Models for Emergency Evacuation: A Review Targeting Human-Centered Sensing",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892a156/12OmNCdk2LY",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2013/3408/0/5104a425",
"title": "Reality Mining: Digging the Impact of Friendship and Location on Crowd Behavior",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2013/5104a425/12OmNs0kyye",
"parentPublication": {
"id": "proceedings/mass/2013/3408/0",
"title": "2013 IEEE 10th International Conference on Mobile Ad-Hoc and Sensor Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2011/0372/0/06122675",
"title": "Handling greeting gesture in simulated crowd",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2011/06122675/12OmNzRHOMX",
"parentPublication": {
"id": "proceedings/grc/2011/0372/0",
"title": "2011 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714119",
"title": "The One-Man-Crowd: Single User Generation of Crowd Motions Using Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714119/1B0XYoSlCKc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2022/6814/0/681400a118",
"title": "Crowd Simulation with Feedback Based on Locomotion State",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2022/681400a118/1I6RQ8VlGNi",
"parentPublication": {
"id": "proceedings/cw/2022/6814/0",
"title": "2022 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdataservice/2019/0059/0/005900a159",
"title": "Dynamic Human Behavior Pattern Detection and Classification",
"doi": null,
"abstractUrl": "/proceedings-article/bigdataservice/2019/005900a159/1dDLXybBk4M",
"parentPublication": {
"id": "proceedings/bigdataservice/2019/0059/0",
"title": "2019 IEEE Fifth International Conference on Big Data Computing Service and Applications (BigDataService)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089573",
"title": "Effects of Interacting with a Crowd of Emotional Virtual Humans on Users’ Affective and Non-Verbal Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089573/1jIxfPwklig",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2020/9325/0/09232468",
"title": "Crowd Flow Collisions Simulation (Student Consortium)",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2020/09232468/1o56CSgXbEY",
"parentPublication": {
"id": "proceedings/bigmm/2020/9325/0",
"title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a209",
"title": "A Virtual Reality Framework for Human-Virtual Crowd Interaction Studies",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a209/1qpzBFKHFpC",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAsTgXc",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwswfZ2",
"doi": "10.1109/ICCVW.2011.6130236",
"title": "Analyzing pedestrian behavior in crowds for automatic detection of congestions",
"normalizedTitle": "Analyzing pedestrian behavior in crowds for automatic detection of congestions",
"abstract": "Congestions in pedestrian traffic typically occur when the number of pedestrians exceeds the capacity of pedestrian facilities. In some cases, the pedestrian density reaches a critical level which may lead to a crowd stampede as happens rather frequently at mass gatherings, in stadiums or at train stations. In the past, research has focused on improving simulations of crowd motion in order to identify potentially dangerous locations and to direct pedestrian streams. Recently, works towards the automatic real-time detection of critical mass behavior based on optical flow computations have been proposed. In this paper, we verify these approaches by analyzing mircoscopic pedestrian behavior in congestions and conducting experiments on synthetic as well as on real datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Congestions in pedestrian traffic typically occur when the number of pedestrians exceeds the capacity of pedestrian facilities. In some cases, the pedestrian density reaches a critical level which may lead to a crowd stampede as happens rather frequently at mass gatherings, in stadiums or at train stations. In the past, research has focused on improving simulations of crowd motion in order to identify potentially dangerous locations and to direct pedestrian streams. Recently, works towards the automatic real-time detection of critical mass behavior based on optical flow computations have been proposed. In this paper, we verify these approaches by analyzing mircoscopic pedestrian behavior in congestions and conducting experiments on synthetic as well as on real datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Congestions in pedestrian traffic typically occur when the number of pedestrians exceeds the capacity of pedestrian facilities. In some cases, the pedestrian density reaches a critical level which may lead to a crowd stampede as happens rather frequently at mass gatherings, in stadiums or at train stations. In the past, research has focused on improving simulations of crowd motion in order to identify potentially dangerous locations and to direct pedestrian streams. Recently, works towards the automatic real-time detection of critical mass behavior based on optical flow computations have been proposed. In this paper, we verify these approaches by analyzing mircoscopic pedestrian behavior in congestions and conducting experiments on synthetic as well as on real datasets.",
"fno": "06130236",
"keywords": [
"Image Sequences",
"Traffic Engineering Computing",
"Automatic Pedestrian Congestion Detection",
"Pedestrian Facilities",
"Train Stations",
"Dangerous Location Identification",
"Automatic Real Time Critical Mass Behavior Detection",
"Optical Flow Computations",
"Mircoscopic Pedestrian Behavior Analysis",
"Legged Locomotion",
"Histograms",
"Humans",
"Oscillators",
"Cameras",
"Computational Modeling",
"Trajectory"
],
"authors": [
{
"affiliation": "Fraunhofer IAIS, 53754 Sankt Augustin, Germany",
"fullName": "Barbara Krausz",
"givenName": "Barbara",
"surname": "Krausz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fraunhofer IAIS, 53754 Sankt Augustin, Germany",
"fullName": "Christian Bauckhage",
"givenName": "Christian",
"surname": "Bauckhage",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "144-149",
"year": "2011",
"issn": null,
"isbn": "978-1-4673-0063-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06130235",
"articleId": "12OmNC8uRso",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06130237",
"articleId": "12OmNxw5B5k",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2011/0844/0/06027326",
"title": "Automatic detection of dangerous motion behavior in human crowds",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2011/06027326/12OmNBtl1Ep",
"parentPublication": {
"id": "proceedings/avss/2011/0844/0",
"title": "2011 8th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2016/3284/0/08010592",
"title": "Vehicle-pedestrian dynamic interaction through tractography of relative movements and articulated pedestrian pose estimation",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2016/08010592/12OmNx4Q6IA",
"parentPublication": {
"id": "proceedings/aipr/2016/3284/0",
"title": "2016 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d137",
"title": "Pedestrian Travel Time Estimation in Crowded Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d137/12OmNxFJXMy",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2017/2939/0/08078498",
"title": "An evidential framework for pedestrian detection in high-density crowds",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2017/08078498/12OmNyVes2v",
"parentPublication": {
"id": "proceedings/avss/2017/2939/0",
"title": "2017 14th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icie/2010/4080/4/05572704",
"title": "Pedestrian Agent Navigation Approach in Virtual Passenger Transfer Hub",
"doi": null,
"abstractUrl": "/proceedings-article/icie/2010/05572704/12OmNz5JCbj",
"parentPublication": {
"id": "proceedings/icie/2010/4080/3",
"title": "Information Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2012/05/05989835",
"title": "Vision-Based Analysis of Small Groups in Pedestrian Crowds",
"doi": null,
"abstractUrl": "/journal/tp/2012/05/05989835/13rRUwI5U91",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/07/07439844",
"title": "Torso Crowds",
"doi": null,
"abstractUrl": "/journal/tg/2017/07/07439844/13rRUy2YLYD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f275",
"title": "Encoding Crowd Interaction with Deep Neural Network for Pedestrian Trajectory Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f275/17D45W2WyzF",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089637",
"title": "Eye-Gaze Activity in Crowds: Impact of Virtual Reality and Density",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089637/1jIx9WIWd5C",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/springsim/2020/370/0/09185410",
"title": "Modeling and Simulating Pedestrian Social Group Behavior with Heterogeneous Social Relationships",
"doi": null,
"abstractUrl": "/proceedings-article/springsim/2020/09185410/1mP60TVadqw",
"parentPublication": {
"id": "proceedings/springsim/2020/370/0",
"title": "2020 Spring Simulation Conference (SpringSim)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1LHcU7Hbllu",
"title": "2022 2nd International Signal Processing, Communications and Engineering Management Conference (ISPCEM)",
"acronym": "ispcem",
"groupId": "10070111",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1LHcXll9mqQ",
"doi": "10.1109/ISPCEM57418.2022.00053",
"title": "Research on multi-scenario crowd density monitoring and management system based on Beidou technology",
"normalizedTitle": "Research on multi-scenario crowd density monitoring and management system based on Beidou technology",
"abstract": "In recent years, the state has vigorously promoted the development of tourism and service industries. At the same time, public security incidents have occurred frequently due to excessive crowd density. Therefore, it is necessary to establish a multi-scenario crowd density monitoring and management system. In this regard, this paper proposes a multi-scenario crowd density monitoring and management system based on Beidou technology, so that managers can formulate scientific crowd control measures. The system consists of three parts: intelligent car, server and user. The smart car moves in a certain area according to the specified trajectory or autonomous navigation. At the collection point, the OpenMV module carried on the car uses the Haar operator to realize face detection. The face detection works by using the Haar Cascade feature detector on the image. Haar Cascades runs very fast, and can be better applied to face detection in the case of people walking. It counts the number of detected faces, and the acquired number is transmitted to the motherboard and the short message sent by the GPS Beidou dual-mode positioning module. The text is sent to the server. The server side processes the number of people and location information received from the trolley. In special circumstances, the trolley can be remotely controlled through the WFI module, which is convenient for viewing at the place where the accident occurred. The client performs system settings on the server to obtain the required information. This system is based on Beidou technology, which helps decision makers to manage the flow of people to a great extent.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years, the state has vigorously promoted the development of tourism and service industries. At the same time, public security incidents have occurred frequently due to excessive crowd density. Therefore, it is necessary to establish a multi-scenario crowd density monitoring and management system. In this regard, this paper proposes a multi-scenario crowd density monitoring and management system based on Beidou technology, so that managers can formulate scientific crowd control measures. The system consists of three parts: intelligent car, server and user. The smart car moves in a certain area according to the specified trajectory or autonomous navigation. At the collection point, the OpenMV module carried on the car uses the Haar operator to realize face detection. The face detection works by using the Haar Cascade feature detector on the image. Haar Cascades runs very fast, and can be better applied to face detection in the case of people walking. It counts the number of detected faces, and the acquired number is transmitted to the motherboard and the short message sent by the GPS Beidou dual-mode positioning module. The text is sent to the server. The server side processes the number of people and location information received from the trolley. In special circumstances, the trolley can be remotely controlled through the WFI module, which is convenient for viewing at the place where the accident occurred. The client performs system settings on the server to obtain the required information. This system is based on Beidou technology, which helps decision makers to manage the flow of people to a great extent.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years, the state has vigorously promoted the development of tourism and service industries. At the same time, public security incidents have occurred frequently due to excessive crowd density. Therefore, it is necessary to establish a multi-scenario crowd density monitoring and management system. In this regard, this paper proposes a multi-scenario crowd density monitoring and management system based on Beidou technology, so that managers can formulate scientific crowd control measures. The system consists of three parts: intelligent car, server and user. The smart car moves in a certain area according to the specified trajectory or autonomous navigation. At the collection point, the OpenMV module carried on the car uses the Haar operator to realize face detection. The face detection works by using the Haar Cascade feature detector on the image. Haar Cascades runs very fast, and can be better applied to face detection in the case of people walking. It counts the number of detected faces, and the acquired number is transmitted to the motherboard and the short message sent by the GPS Beidou dual-mode positioning module. The text is sent to the server. The server side processes the number of people and location information received from the trolley. In special circumstances, the trolley can be remotely controlled through the WFI module, which is convenient for viewing at the place where the accident occurred. The client performs system settings on the server to obtain the required information. This system is based on Beidou technology, which helps decision makers to manage the flow of people to a great extent.",
"fno": "927100a233",
"keywords": [
"Computerised Monitoring",
"Decision Making",
"Face Recognition",
"Global Positioning System",
"Microcontrollers",
"Telecontrol",
"Travel Industry",
"Decision Makers",
"Face Detection",
"GPS Beidou Dual Mode Positioning Module",
"Haar Cascade Feature Detector",
"Multiscenario Crowd Density Monitoring Management System",
"Open MV Module",
"Service Industries",
"Tourism",
"WFI Module",
"Legged Locomotion",
"Signal Processing",
"Trajectory",
"Safety",
"Servers",
"Face Detection",
"Automobiles",
"Beidou Positioning",
"Number Recognition",
"Management System"
],
"authors": [
{
"affiliation": "Civil Aviation University of China,Tianjin,China",
"fullName": "Xiaying Ji",
"givenName": "Xiaying",
"surname": "Ji",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Civil Aviation University of China,Tianjin,China",
"fullName": "Jiameng Xue",
"givenName": "Jiameng",
"surname": "Xue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Civil Aviation University of China,Tianjin,China",
"fullName": "Shang Gao",
"givenName": "Shang",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Civil Aviation University of China,Tianjin,China",
"fullName": "Zhuoyue Zhang",
"givenName": "Zhuoyue",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ispcem",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-11-01T00:00:00",
"pubType": "proceedings",
"pages": "233-237",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9271-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "927100a229",
"articleId": "1LHd0RrkqQg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "927100a238",
"articleId": "1LHcWukHCrS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/grc/2011/0372/0/06122675",
"title": "Handling greeting gesture in simulated crowd",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2011/06122675/12OmNzRHOMX",
"parentPublication": {
"id": "proceedings/grc/2011/0372/0",
"title": "2011 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012156",
"title": "The large-scale crowd density estimation based on sparse spatiotemporal local binary pattern",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012156/12OmNzvQI0B",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f275",
"title": "Encoding Crowd Interaction with Deep Neural Network for Pedestrian Trajectory Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f275/17D45W2WyzF",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714119",
"title": "The One-Man-Crowd: Single User Generation of Crowd Motions Using Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714119/1B0XYoSlCKc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2022/6814/0/681400a118",
"title": "Crowd Simulation with Feedback Based on Locomotion State",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2022/681400a118/1I6RQ8VlGNi",
"parentPublication": {
"id": "proceedings/cw/2022/6814/0",
"title": "2022 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinc/2022/0969/0/096900a173",
"title": "Research on Intelligent Condition Monitoring of Power System Based on Beidou Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icinc/2022/096900a173/1M670S2IWYg",
"parentPublication": {
"id": "proceedings/icinc/2022/0969/0",
"title": "2022 International Conference on Informatics, Networking and Computing (ICINC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicas/2019/6106/0/610600a482",
"title": "Monitoring and Research on the Displacement and Deformation of Transmission Line Towers Based on Beidou Monitoring Data",
"doi": null,
"abstractUrl": "/proceedings-article/icicas/2019/610600a482/1iHV222YyyY",
"parentPublication": {
"id": "proceedings/icicas/2019/6106/0",
"title": "2019 International Conference on Intelligent Computing, Automation and Systems (ICICAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089637",
"title": "Eye-Gaze Activity in Crowds: Impact of Virtual Reality and Density",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089637/1jIx9WIWd5C",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a209",
"title": "A Virtual Reality Framework for Human-Virtual Crowd Interaction Studies",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a209/1qpzBFKHFpC",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2020/2314/0/231400c392",
"title": "An Adaptive Positioning and Support System for Field Operations based on Beidou",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2020/231400c392/1tzyBnPfxGU",
"parentPublication": {
"id": "proceedings/icmcce/2020/2314/0",
"title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1qpzz6dhLLq",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qpzBFKHFpC",
"doi": "10.1109/AIVR50618.2020.00043",
"title": "A Virtual Reality Framework for Human-Virtual Crowd Interaction Studies",
"normalizedTitle": "A Virtual Reality Framework for Human-Virtual Crowd Interaction Studies",
"abstract": "In this paper we developed a generic framework for authoring virtual crowds with minimal effort. Our intention is to providing to the virtual reality community a framework that allows easy to author virtual crowd scenarios that can be used for human-crowd interaction studies. From previous studies we have conducted, we realized the need of such a framework as it facilitates quicker setup and testing as well as standardizes the measurements and the interaction with virtual crowds. The framework includes assets with realistic human models, and configurations for crowd behavior composition.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we developed a generic framework for authoring virtual crowds with minimal effort. Our intention is to providing to the virtual reality community a framework that allows easy to author virtual crowd scenarios that can be used for human-crowd interaction studies. From previous studies we have conducted, we realized the need of such a framework as it facilitates quicker setup and testing as well as standardizes the measurements and the interaction with virtual crowds. The framework includes assets with realistic human models, and configurations for crowd behavior composition.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we developed a generic framework for authoring virtual crowds with minimal effort. Our intention is to providing to the virtual reality community a framework that allows easy to author virtual crowd scenarios that can be used for human-crowd interaction studies. From previous studies we have conducted, we realized the need of such a framework as it facilitates quicker setup and testing as well as standardizes the measurements and the interaction with virtual crowds. The framework includes assets with realistic human models, and configurations for crowd behavior composition.",
"fno": "746300a209",
"keywords": [
"Authoring Systems",
"Behavioural Sciences Computing",
"Human Computer Interaction",
"Virtual Reality",
"Virtual Reality Framework",
"Human Virtual Crowd Interaction Studies",
"Virtual Reality Community",
"Realistic Human Models",
"Crowd Behavior Composition",
"Virtual Crowd Scenario Authoring",
"Legged Locomotion",
"Solid Modeling",
"Trajectory",
"Particle Measurements",
"Navigation",
"Games",
"Atmospheric Measurements",
"Virtual Crowd",
"Crowd Authoring",
"Virtual Reality",
"Human Crowd Interaction",
"Experimental Studies"
],
"authors": [
{
"affiliation": "Purdue University,Department of Computer Graphics Technology,West Lafayette,Indiana,U.S.A.",
"fullName": "Michael Nelson",
"givenName": "Michael",
"surname": "Nelson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University,Department of Computer Graphics Technology,West Lafayette,Indiana,U.S.A.",
"fullName": "Christos Mousas",
"givenName": "Christos",
"surname": "Mousas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "209-213",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7463-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "746300a205",
"articleId": "1qpzBsavOuc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "746300a214",
"articleId": "1qpzCDPZguk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549382",
"title": "Poster: Do walking motions enhance visually induced self-motion illusions in virtual reality?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549382/12OmNBr4eym",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ngmast/2016/0949/0/07801465",
"title": "Immersive Virtual Reality as a Supplement in the Rehabilitation Program of Post-Stroke Patients",
"doi": null,
"abstractUrl": "/proceedings-article/ngmast/2016/07801465/12OmNrMZpyR",
"parentPublication": {
"id": "proceedings/ngmast/2016/0949/0",
"title": "2016 10th International Conference on Next-Generation Mobile Applications, Security and Technologies (NGMAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2011/1451/0/06000319",
"title": "Crowd simulation in emergency aircraft evacuation using Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2011/06000319/12OmNxj23jk",
"parentPublication": {
"id": "proceedings/cgames/2011/1451/0",
"title": "2011 16th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131756",
"title": "Distance perception during cooperative virtual locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131756/12OmNy49sEA",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2014/6036/0/6036a001",
"title": "Finding Nemo: Finding Your Lost Child in Crowds via Mobile Crowd Sensing",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2014/6036a001/12OmNzxgHBj",
"parentPublication": {
"id": "proceedings/mass/2014/6036/0",
"title": "2014 IEEE 11th International Conference on Mobile Ad Hoc and Sensor Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446235",
"title": "Influences on the Elicitation of Interpersonal Space with Virtual Humans",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446235/13bd1eW2l9F",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404588",
"title": "Recalibration of Perceived Distance in Virtual Environments Occurs Rapidly and Transfers Asymmetrically Across Scale",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404588/13rRUyuegh9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714119",
"title": "The One-Man-Crowd: Single User Generation of Crowd Motions Using Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714119/1B0XYoSlCKc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089637",
"title": "Eye-Gaze Activity in Crowds: Impact of Virtual Reality and Density",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089637/1jIx9WIWd5C",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089573",
"title": "Effects of Interacting with a Crowd of Emotional Virtual Humans on Users’ Affective and Non-Verbal Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089573/1jIxfPwklig",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAiSpZ",
"title": "2015 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx2QUJN",
"doi": "10.1109/VR.2015.7223331",
"title": "Mixed reality simulation with physical mobile display devices",
"normalizedTitle": "Mixed reality simulation with physical mobile display devices",
"abstract": "This paper presents the design and implementation of a system for simulating mixed reality in setups combining mobile devices and large backdrop displays. With a mixed reality simulator, one can perform usability studies and evaluate mixed reality systems while minimizing confounding variables. This paper describes how mobile device AR design factors can be flexibly and systematically explored without sacrificing the touch and direct unobstructed manipulation of a physical personal MR display. First, we describe general principles to consider when implementing a mixed reality simulator, enumerating design factors. Then, we present our implementation which utilizes personal mobile display devices in conjunction with a large surround-view display environment. Standing in the center of the display, a user may direct a mobile device, such as a tablet or head-mounted display, to a portion of the scene, which affords them a potentially annotated view of the area of interest. The user may employ gesture or touch screen interaction on a simulated augmented camera feed, as they typically would in video-see-through mixed reality applications. We present calibration and system performance results and illustrate our system's flexibility by presenting the design of three usability evaluation scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents the design and implementation of a system for simulating mixed reality in setups combining mobile devices and large backdrop displays. With a mixed reality simulator, one can perform usability studies and evaluate mixed reality systems while minimizing confounding variables. This paper describes how mobile device AR design factors can be flexibly and systematically explored without sacrificing the touch and direct unobstructed manipulation of a physical personal MR display. First, we describe general principles to consider when implementing a mixed reality simulator, enumerating design factors. Then, we present our implementation which utilizes personal mobile display devices in conjunction with a large surround-view display environment. Standing in the center of the display, a user may direct a mobile device, such as a tablet or head-mounted display, to a portion of the scene, which affords them a potentially annotated view of the area of interest. The user may employ gesture or touch screen interaction on a simulated augmented camera feed, as they typically would in video-see-through mixed reality applications. We present calibration and system performance results and illustrate our system's flexibility by presenting the design of three usability evaluation scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents the design and implementation of a system for simulating mixed reality in setups combining mobile devices and large backdrop displays. With a mixed reality simulator, one can perform usability studies and evaluate mixed reality systems while minimizing confounding variables. This paper describes how mobile device AR design factors can be flexibly and systematically explored without sacrificing the touch and direct unobstructed manipulation of a physical personal MR display. First, we describe general principles to consider when implementing a mixed reality simulator, enumerating design factors. Then, we present our implementation which utilizes personal mobile display devices in conjunction with a large surround-view display environment. Standing in the center of the display, a user may direct a mobile device, such as a tablet or head-mounted display, to a portion of the scene, which affords them a potentially annotated view of the area of interest. The user may employ gesture or touch screen interaction on a simulated augmented camera feed, as they typically would in video-see-through mixed reality applications. We present calibration and system performance results and illustrate our system's flexibility by presenting the design of three usability evaluation scenarios.",
"fno": "07223331",
"keywords": [
"Virtual Reality",
"Cameras",
"Lenses",
"Smart Phones",
"Servers",
"Mobile Communication",
"Interaction Techniques",
"Augmented Reality",
"Virtual Reality",
"Large Displays",
"Immersive Displays",
"Mobile Device",
"Input Device"
],
"authors": [
{
"affiliation": "University of California, Santa Barbara",
"fullName": "Mathieu Rodrigue",
"givenName": "Mathieu",
"surname": "Rodrigue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California, Santa Barbara",
"fullName": "Andrew Waranis",
"givenName": "Andrew",
"surname": "Waranis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California, Santa Barbara",
"fullName": "Tim Wood",
"givenName": "Tim",
"surname": "Wood",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California, Santa Barbara",
"fullName": "Tobias Hollerer",
"givenName": "Tobias",
"surname": "Hollerer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "105-110",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1727-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07223330",
"articleId": "12OmNCdk2JE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07223332",
"articleId": "12OmNBIWXAD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ngmast/2009/3786/0/3786a058",
"title": "Layer-Based Media Integration for Mobile Mixed-Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ngmast/2009/3786a058/12OmNAgoV6Y",
"parentPublication": {
"id": "proceedings/ngmast/2009/3786/0",
"title": "Next Generation Mobile Applications, Services and Technologies, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643608",
"title": "Various tangible devices suitable for mixed reality interactions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643608/12OmNBpVPUh",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvri/2011/0054/0/05759615",
"title": "MRStudio: A mixed reality display system for aircraft cockpit",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759615/12OmNC3FGg8",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480797",
"title": "MIRAGE: A Touch Screen based Mixed Reality Interface for Space Planning Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480797/12OmNwFidfP",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444815",
"title": "Egocentric space-distorting visualizations for rapid environment exploration in mobile mixed reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444815/12OmNylsZU8",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550226",
"title": "Poster: Creating a user-specific perspective view for mobile mixed reality systems on smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550226/12OmNzsJ7tT",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/01/ttg2013010159",
"title": "Visuo-Haptic Mixed Reality with Unobstructed Tool-Hand Integration",
"doi": null,
"abstractUrl": "/journal/tg/2013/01/ttg2013010159/13rRUyeTVi1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714124",
"title": "Video See-Through Mixed Reality with Focus Cues",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714124/1B0XWyWo5KE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798255",
"title": "HapticSphere: Physical Support To Enable Precision Touch Interaction in Mobile Mixed-Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798255/1cJ0Uje3t8Q",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a353",
"title": "Perceptual MR Space: Interactive Toolkit for Efficient Environment Reconstruction in Mobile Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a353/1gysi7jaaKQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7WfLETGnu",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00195",
"title": "CADET: A Collaborative Agile Data Exploration Tool for Mixed Reality",
"normalizedTitle": "CADET: A Collaborative Agile Data Exploration Tool for Mixed Reality",
"abstract": "The need to understand and communicate the nuances of complex situational information is an ever-present requirement in Command and Control (C2). It is often difficult for remote users of a system to clearly understand what a user is trying to relay. Mixed Reality (MR) technology presents a significant opportunity for exploring and communicating C2 data. In this paper, we present our system, CADET, as step towards enriching the collaborative C2 user ex-perience by allowing users to remotely and locally perform adhoc analysis through information displays created by hand interactions and speech in MR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The need to understand and communicate the nuances of complex situational information is an ever-present requirement in Command and Control (C2). It is often difficult for remote users of a system to clearly understand what a user is trying to relay. Mixed Reality (MR) technology presents a significant opportunity for exploring and communicating C2 data. In this paper, we present our system, CADET, as step towards enriching the collaborative C2 user ex-perience by allowing users to remotely and locally perform adhoc analysis through information displays created by hand interactions and speech in MR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The need to understand and communicate the nuances of complex situational information is an ever-present requirement in Command and Control (C2). It is often difficult for remote users of a system to clearly understand what a user is trying to relay. Mixed Reality (MR) technology presents a significant opportunity for exploring and communicating C2 data. In this paper, we present our system, CADET, as step towards enriching the collaborative C2 user ex-perience by allowing users to remotely and locally perform adhoc analysis through information displays created by hand interactions and speech in MR.",
"fno": "536500a899",
"keywords": [
"Augmented Reality",
"Command And Control Systems",
"Groupware",
"User Experience",
"User Interfaces",
"Adhoc Analysis",
"CADET",
"Collaborative Agile Data Exploration Tool",
"Collaborative C 2 User Experience",
"Command And Control",
"Complex Situational Information",
"Hand Interactions",
"Information Displays",
"Mixed Reality Technology",
"Remote Users",
"Significant Opportunity",
"Command And Control Systems",
"Mixed Reality",
"Data Visualization",
"Collaboration",
"Relays",
"Augmented Reality",
"Human Centered Computing Visualization Visualization Application Domains Information Visualization",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "University of South Australia",
"fullName": "Jeremy McDade",
"givenName": "Jeremy",
"surname": "McDade",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Adam Drogemuller",
"givenName": "Adam",
"surname": "Drogemuller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Allison Jing",
"givenName": "Allison",
"surname": "Jing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Nick Ireland",
"givenName": "Nick",
"surname": "Ireland",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "James Walsh",
"givenName": "James",
"surname": "Walsh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Bruce Thomas",
"givenName": "Bruce",
"surname": "Thomas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Wolfgang Mayer",
"givenName": "Wolfgang",
"surname": "Mayer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Andrew Cunningham",
"givenName": "Andrew",
"surname": "Cunningham",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "899-900",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a897",
"articleId": "1J7Wcw58WoU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a901",
"articleId": "1J7Wi3ec0us",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvvrhc/1998/8283/0/82830078",
"title": "Vision and Graphics in Producing Mixed Reality Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1",
"parentPublication": {
"id": "proceedings/cvvrhc/1998/8283/0",
"title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2008/2357/0/04594739",
"title": "Here and there: Experiencing co-presence through mixed reality-mediated collaborative design system",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2008/04594739/12OmNzRHOVV",
"parentPublication": {
"id": "proceedings/cit/2008/2357/0",
"title": "2008 8th IEEE International Conference on Computer and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cste/2022/8188/0/818800a082",
"title": "Integrating Inquiry-Based Pedagogy with Mixed Reality: Theories and Practices",
"doi": null,
"abstractUrl": "/proceedings-article/cste/2022/818800a082/1J7VZM9bxDi",
"parentPublication": {
"id": "proceedings/cste/2022/8188/0",
"title": "2022 4th International Conference on Computer Science and Technologies in Education (CSTE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a615",
"title": "A Shared Interactive Space in Mixed Reality for Collaborative Digital Tower Operations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a615/1J7W9HboPmg",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a266",
"title": "Toward Methods To Develop Experience Measurements For Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a266/1J7WhrNYPh6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a923",
"title": "Cross Reality Authoring: A Mixed Reality Editor approach",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a923/1J7WtZdBjig",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2019/5434/0/543400a252",
"title": "Mixed Reality, Mamulengos and MamuLEDs",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2019/543400a252/1fHjxASaNgc",
"parentPublication": {
"id": "proceedings/svr/2019/5434/0",
"title": "2019 21st Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a778",
"title": "Evaluating Object Manipulation Interaction Techniques in Mixed Reality: Tangible User Interfaces and Gesture",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a778/1tuBngWRAC4",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a399",
"title": "Detecting and Preventing Faked Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a399/1xPsmX6Ouvm",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a274",
"title": "A Mixed-Reality System to Promote Child Engagement in Remote Intergenerational Storytelling",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a274/1yeQMxSyLp6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1KmF7rVz6Y8",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1KmFaKoZWhy",
"doi": "10.1109/AIVR56993.2022.00030",
"title": "Active Visualization of Visual Cues on Hand for Better User Interface Design Generalization in Mixed Reality",
"normalizedTitle": "Active Visualization of Visual Cues on Hand for Better User Interface Design Generalization in Mixed Reality",
"abstract": "With the emergence of various unique augmented reality devices, researchers are exploring how mixed-reality applications can enhance user experience. We propose a working prototype emphasizing that mixed reality applications should consider incorporating visual cues on the user’s hand for better user experience, in our case representing selected color on the index fingertip. Such a design can assist users in being attentive regarding what color they are using. Eventually, reducing unintended errors occur when the active visual component is not visible in the field of view. Generally, we argue that representing visual cues on the user’s hand has major advantages, including defining a general platform for placing visual cues, immediate response, and preserving computational resources. Most importantly, developers can utilize the general platform to give or place visual feedback in mixed-reality applications. Moreover, we highlight the importance of interacting in mid-air compared to tactile feedback.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the emergence of various unique augmented reality devices, researchers are exploring how mixed-reality applications can enhance user experience. We propose a working prototype emphasizing that mixed reality applications should consider incorporating visual cues on the user’s hand for better user experience, in our case representing selected color on the index fingertip. Such a design can assist users in being attentive regarding what color they are using. Eventually, reducing unintended errors occur when the active visual component is not visible in the field of view. Generally, we argue that representing visual cues on the user’s hand has major advantages, including defining a general platform for placing visual cues, immediate response, and preserving computational resources. Most importantly, developers can utilize the general platform to give or place visual feedback in mixed-reality applications. Moreover, we highlight the importance of interacting in mid-air compared to tactile feedback.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the emergence of various unique augmented reality devices, researchers are exploring how mixed-reality applications can enhance user experience. We propose a working prototype emphasizing that mixed reality applications should consider incorporating visual cues on the user’s hand for better user experience, in our case representing selected color on the index fingertip. Such a design can assist users in being attentive regarding what color they are using. Eventually, reducing unintended errors occur when the active visual component is not visible in the field of view. Generally, we argue that representing visual cues on the user’s hand has major advantages, including defining a general platform for placing visual cues, immediate response, and preserving computational resources. Most importantly, developers can utilize the general platform to give or place visual feedback in mixed-reality applications. Moreover, we highlight the importance of interacting in mid-air compared to tactile feedback.",
"fno": "572500a149",
"keywords": [
"Augmented Reality",
"Data Visualisation",
"Human Computer Interaction",
"User Interfaces",
"Virtual Reality",
"Active Visual Component",
"Active Visualization",
"Better User Interface Design Generalization",
"General Platform",
"Mixed Reality Applications",
"Unique Augmented Reality Devices",
"User Experience",
"Visual Cues",
"Visual Feedback",
"Visualization",
"Mixed Reality",
"Tactile Sensors",
"Prototypes",
"Color",
"User Interfaces",
"User Experience",
"Augmented Reality",
"Mixed Reality",
"Visual Cues",
"Active Visualization",
"User Interface Design",
"Color Selection",
"Hand Interaction"
],
"authors": [
{
"affiliation": "Dalhousie University,Faculty of Computer Science,Halifax,Canada",
"fullName": "Muhammad Raza",
"givenName": "Muhammad",
"surname": "Raza",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dalhousie University,Faculty of Computer Science,Halifax,Canada",
"fullName": "Derek Reilly",
"givenName": "Derek",
"surname": "Reilly",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dalhousie University,Faculty of Computer Science,Halifax,Canada",
"fullName": "Joseph Malloch",
"givenName": "Joseph",
"surname": "Malloch",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-12-01T00:00:00",
"pubType": "proceedings",
"pages": "149-152",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5725-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "572500a144",
"articleId": "1KmFetCHntS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "572500a153",
"articleId": "1KmFd2A2UH6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2010/6846/0/05444706",
"title": "Evaluating depth perception of photorealistic mixed reality visualizations for occluded objects in outdoor environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2010/05444706/12OmNsd6vhN",
"parentPublication": {
"id": "proceedings/3dui/2010/6846/0",
"title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/01/ttg2013010159",
"title": "Visuo-Haptic Mixed Reality with Unobstructed Tool-Hand Integration",
"doi": null,
"abstractUrl": "/journal/tg/2013/01/ttg2013010159/13rRUyeTVi1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714124",
"title": "Video See-Through Mixed Reality with Focus Cues",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714124/1B0XWyWo5KE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09978915",
"title": "Visual Cue Based Corrective Feedback for Motor Skill Training in Mixed Reality: A Survey",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09978915/1IXUnNBj0Yw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a414",
"title": "Evaluating the Object-Centered User Interface in Head-Worn Mixed Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a414/1JrRiVjEd44",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798128",
"title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798128/1cJ1aXJnUyI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a353",
"title": "Perceptual MR Space: Interactive Toolkit for Efficient Environment Reconstruction in Mobile Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a353/1gysi7jaaKQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a778",
"title": "Evaluating Object Manipulation Interaction Techniques in Mixed Reality: Tangible User Interfaces and Gesture",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a778/1tuBngWRAC4",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a473",
"title": "The Impact of Gaze Cues in Mixed Reality Collaborations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a473/1yeQCejb7Co",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a075",
"title": "Immersive Experience Prototyping: Using Mixed Reality to Integrate Real Devices in Virtual Simulated Contexts to Prototype Experiences with Mobile Apps",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a075/1yfxIU5uhR6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1aXJnUyI",
"doi": "10.1109/VR.2019.8798128",
"title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration",
"normalizedTitle": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration",
"abstract": "We propose enhancing live 360 panorama-based Mixed Reality (MR) remote collaboration through supporting visual annotation cues. Prior work on live 360 panorama-based collaboration used MR visualization to overlay visual cues, such as view frames and virtual hands, yet they were not registered onto the shared physical workspace, hence had limitations in accuracy for pointing or marking objects. Our prototype system uses spatial mapping and tracking feature of an Augmented Reality head-mounted display to show visual annotation cues accurately registered onto the physical environment. We describe the design and implementation details of our prototype system, and discuss on how such feature could help improve MR remote collaboration.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose enhancing live 360 panorama-based Mixed Reality (MR) remote collaboration through supporting visual annotation cues. Prior work on live 360 panorama-based collaboration used MR visualization to overlay visual cues, such as view frames and virtual hands, yet they were not registered onto the shared physical workspace, hence had limitations in accuracy for pointing or marking objects. Our prototype system uses spatial mapping and tracking feature of an Augmented Reality head-mounted display to show visual annotation cues accurately registered onto the physical environment. We describe the design and implementation details of our prototype system, and discuss on how such feature could help improve MR remote collaboration.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose enhancing live 360 panorama-based Mixed Reality (MR) remote collaboration through supporting visual annotation cues. Prior work on live 360 panorama-based collaboration used MR visualization to overlay visual cues, such as view frames and virtual hands, yet they were not registered onto the shared physical workspace, hence had limitations in accuracy for pointing or marking objects. Our prototype system uses spatial mapping and tracking feature of an Augmented Reality head-mounted display to show visual annotation cues accurately registered onto the physical environment. We describe the design and implementation details of our prototype system, and discuss on how such feature could help improve MR remote collaboration.",
"fno": "08798128",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Object Tracking",
"Visual Annotation Cues",
"MR Visualization",
"Visual Cues",
"MR Remote Collaboration",
"Augmented Reality Head Mounted Display",
"Live 360 Panorama Based Mixed Reality Remote Collaboration",
"View Frames",
"Virtual Hands",
"Visualization",
"Collaboration",
"Resists",
"Three Dimensional Displays",
"Prototypes",
"Augmented Reality",
"Mixed Reality",
"Remote Collaboration",
"360 Panorama",
"Annotation",
"H 5 3 Information Interfaces And Presentation Group And Organization Interfaces X 2014 Collaborative Computing",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems X 2014 Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "School of ITMS, University of South Australia",
"fullName": "Theophilus Teo",
"givenName": "Theophilus",
"surname": "Teo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of ITMS, University of South Australia",
"fullName": "Gun A Lee",
"givenName": "Gun A",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of ITMS, University of South Australia",
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CSIRO",
"fullName": "Matt Adcock",
"givenName": "Matt",
"surname": "Adcock",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1187-1188",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798061",
"articleId": "1cJ0FW8mZpK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798123",
"articleId": "1cJ0VB3SBlS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671795",
"title": "Study of augmented gesture communication cues and view sharing in remote collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671795/12OmNwl8GBu",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948517",
"title": "Collaboration in mediated and augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948517/12OmNy6HQPU",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a218",
"title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a153",
"title": "A User Study on MR Remote Collaboration Using Live 360 Video",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a153/17D45VsBU1V",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699227",
"title": "Do You Know What I Mean? An MR-Based Collaborative Platform",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699227/19F1PhUp98k",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a250",
"title": "Using Speech to Visualise Shared Gaze Cues in MR Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a250/1CJcnpSVomk",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a022",
"title": "Merging Live and Static 360 Panoramas Inside a 3D Scene for Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a022/1gysn0YPLm8",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093582",
"title": "360 Panorama Synthesis from a Sparse Set of Images with Unknown Field of View",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093582/1jPbrcnBX8s",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a532",
"title": "TeleGate: Immersive Multi-User Collaboration for Mixed Reality 360°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a532/1tnXy7NpnGg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a473",
"title": "The Impact of Gaze Cues in Mixed Reality Collaborations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a473/1yeQCejb7Co",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1qpzz6dhLLq",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qpzAneRTX2",
"doi": "10.1109/AIVR50618.2020.00017",
"title": "Mirrorlabs - creating accessible Digital Twins of robotic production environment with Mixed Reality",
"normalizedTitle": "Mirrorlabs - creating accessible Digital Twins of robotic production environment with Mixed Reality",
"abstract": "How to visualize recorded production data in Virtual Reality? How to use state of the art Augmented Reality displays that can show robot data? This paper introduces an opensource ICT framework approach for combining Unity-based Mixed Reality applications with robotic production equipment using ROS Industrial. This publication gives details on the implementation and demonstrates the use as a data analysis tool in the context of scientific exchange within the area of Mixed Reality enabled human-robot co-production.",
"abstracts": [
{
"abstractType": "Regular",
"content": "How to visualize recorded production data in Virtual Reality? How to use state of the art Augmented Reality displays that can show robot data? This paper introduces an opensource ICT framework approach for combining Unity-based Mixed Reality applications with robotic production equipment using ROS Industrial. This publication gives details on the implementation and demonstrates the use as a data analysis tool in the context of scientific exchange within the area of Mixed Reality enabled human-robot co-production.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "How to visualize recorded production data in Virtual Reality? How to use state of the art Augmented Reality displays that can show robot data? This paper introduces an opensource ICT framework approach for combining Unity-based Mixed Reality applications with robotic production equipment using ROS Industrial. This publication gives details on the implementation and demonstrates the use as a data analysis tool in the context of scientific exchange within the area of Mixed Reality enabled human-robot co-production.",
"fno": "746300a043",
"keywords": [
"Augmented Reality",
"Data Analysis",
"Data Visualisation",
"Human Robot Interaction",
"Industrial Robots",
"Production Engineering Computing",
"Production Equipment",
"Public Domain Software",
"Virtual Reality",
"Augmented Reality",
"Robotic Production Equipment",
"Data Analysis Tool",
"Human Robot Co Production",
"Mirrorlabs",
"Digital Twins",
"Robotic Production Environment",
"Unity Based Mixed Reality Applications",
"Production Data Visualization",
"Open Source ICT Framework",
"ROS Industrial",
"Robots",
"Service Robots",
"Robot Sensing Systems",
"Production",
"Mixed Reality",
"Hardware",
"Software",
"Industry 4 0",
"Manufacturing",
"Augmented Reality",
"Virtual Reality",
"Human Robot Co Production",
"Human Robot Interaction"
],
"authors": [
{
"affiliation": "Industrial Design Engineering TU Delft,Delft,Netherlands",
"fullName": "Doris Aschenbrenner",
"givenName": "Doris",
"surname": "Aschenbrenner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Industrial Design Engineering TU Delft,Delft,Netherlands",
"fullName": "Jonas S.I. Rieder",
"givenName": "Jonas S.I.",
"surname": "Rieder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Industrial Design Engineering TU Delft,Delft,Netherlands",
"fullName": "Daniëlle van Tol",
"givenName": "Daniëlle",
"surname": "van Tol",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Industrial Design Engineering TU Delft,Delft,Netherlands",
"fullName": "Joris van Dam",
"givenName": "Joris",
"surname": "van Dam",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Industrial Design Engineering TU Delft,Delft,Netherlands",
"fullName": "Zoltan Rusak",
"givenName": "Zoltan",
"surname": "Rusak",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalto University,Electrical Engineering and Automation,Espoo,Finland",
"fullName": "Jan Olaf Blech",
"givenName": "Jan Olaf",
"surname": "Blech",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalto University,Electrical Engineering and Automation,Espoo,Finland",
"fullName": "Mohammad Azangoo",
"givenName": "Mohammad",
"surname": "Azangoo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalto University,Electrical Engineering and Automation,Espoo,Finland",
"fullName": "Salo Panu",
"givenName": "Salo",
"surname": "Panu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lab University Tartu,Intelligent Materials and Systems,Tartu,Estonia",
"fullName": "Karl Kruusamäe",
"givenName": "Karl",
"surname": "Kruusamäe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lab University Tartu,Intelligent Materials and Systems,Tartu,Estonia",
"fullName": "Houman Masnavi",
"givenName": "Houman",
"surname": "Masnavi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lab University Tartu,Intelligent Materials and Systems,Tartu,Estonia",
"fullName": "Igor Rybalskii",
"givenName": "Igor",
"surname": "Rybalskii",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lab University Tartu,Intelligent Materials and Systems,Tartu,Estonia",
"fullName": "Alvo Aabloo",
"givenName": "Alvo",
"surname": "Aabloo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CRIIS, INESC TEC - Institute for Systems and Computer Engineering, Technology and Science,Porto,Portugal",
"fullName": "Marcelo Petry",
"givenName": "Marcelo",
"surname": "Petry",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CRIIS, INESC TEC - Institute for Systems and Computer Engineering, Technology and Science,Porto,Portugal",
"fullName": "Gustavo Teixeira",
"givenName": "Gustavo",
"surname": "Teixeira",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Braunschweig,Braunschweig,Germany",
"fullName": "Bastian Thiede",
"givenName": "Bastian",
"surname": "Thiede",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "SUPSI,Manno,Switzerland",
"fullName": "Paolo Pedrazzoli",
"givenName": "Paolo",
"surname": "Pedrazzoli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "SUPSI,Manno,Switzerland",
"fullName": "Andrea Ferrario",
"givenName": "Andrea",
"surname": "Ferrario",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "SUPSI,Manno,Switzerland",
"fullName": "Michele Foletti",
"givenName": "Michele",
"surname": "Foletti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "SUPSI,Manno,Switzerland",
"fullName": "Matteo Confalonieri",
"givenName": "Matteo",
"surname": "Confalonieri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "SUPSI,Manno,Switzerland",
"fullName": "Daniele Bertaggia",
"givenName": "Daniele",
"surname": "Bertaggia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Patras,LMS,Patras,Greece",
"fullName": "Thodoris Togias",
"givenName": "Thodoris",
"surname": "Togias",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Patras,LMS,Patras,Greece",
"fullName": "Sotiris Makris",
"givenName": "Sotiris",
"surname": "Makris",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "43-48",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7463-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "746300a037",
"articleId": "1qpzzFIz1tK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "746300a049",
"articleId": "1qpzDl7OBkk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/robot/1992/2720/0/00220204",
"title": "Robotic assembly operation based on task-level teaching in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220204/12OmNvTTcbj",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402585",
"title": "Alice's adventures in an immersive mixed reality environment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402585/12OmNyxXlsF",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/03/mcg2015030033",
"title": "Exploration of Alternative Interaction Techniques for Robotic Systems",
"doi": null,
"abstractUrl": "/magazine/cg/2015/03/mcg2015030033/13rRUwwJWBm",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2011/03/tlt2011030249",
"title": "A Mobile Mixed-Reality Environment for Children's Storytelling Using a Handheld Projector and a Robot",
"doi": null,
"abstractUrl": "/journal/lt/2011/03/tlt2011030249/13rRUxC0SIH",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2012/02/mex2012020019",
"title": "Mixed-reality testbeds for incremental development of HART applications",
"doi": null,
"abstractUrl": "/magazine/ex/2012/02/mex2012020019/13rRUxlgxZS",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2018/8161/0/08633656",
"title": "Cooperating Robots for Production and Assembly: A Technical Review",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2018/08633656/17D45VtKitk",
"parentPublication": {
"id": "proceedings/iisa/2018/8161/0",
"title": "2018 9th International Conference on Information, Intelligence, Systems and Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icict/2022/6960/0/696000a153",
"title": "Mixed reality (MR) Enabled Proprio and Teleoperation of a Humanoid Robot for Paraplegic Patients",
"doi": null,
"abstractUrl": "/proceedings-article/icict/2022/696000a153/1FJ5bdmciJO",
"parentPublication": {
"id": "proceedings/icict/2022/6960/0",
"title": "2022 5th International Conference on Information and Computer Technologies (ICICT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a056",
"title": "Using mixed reality based digital twins for robotics education",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a056/1J7WkMOnPwc",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a210",
"title": "Digital Twins for Distributed Collaborative Work in Shared Production",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a210/1KmFacnGII8",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a353",
"title": "Perceptual MR Space: Interactive Toolkit for Efficient Environment Reconstruction in Mobile Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a353/1gysi7jaaKQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeQCejb7Co",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00111",
"title": "The Impact of Gaze Cues in Mixed Reality Collaborations",
"normalizedTitle": "The Impact of Gaze Cues in Mixed Reality Collaborations",
"abstract": "Gaze is one of the most important communication cues in performing physical tasks in both face-to-face and remote collaboration. Dynamic gaze information can indicate the user’s intention, focus, and current attention while visualising this information can often compensate for other communication channels that are not always readily available. Previous studies have shown that sharing and understanding another person’s gaze cues can benefit mutual awareness and task coordination in traditional 2D displays. However, researchers have not fully explored the impact of the virtual representations of gaze cues using Mixed Reality technologies. In this doctoral consortium presentation, I will present eyemR-Vis, a 360 panoramic Mixed Reality (MR) remote collaboration system that shares gaze behavioural visualisations between a local worker and a remote collaborator. In the paper I discuss the PhD research motivation, background material, recently published study results, and plans for future work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Gaze is one of the most important communication cues in performing physical tasks in both face-to-face and remote collaboration. Dynamic gaze information can indicate the user’s intention, focus, and current attention while visualising this information can often compensate for other communication channels that are not always readily available. Previous studies have shown that sharing and understanding another person’s gaze cues can benefit mutual awareness and task coordination in traditional 2D displays. However, researchers have not fully explored the impact of the virtual representations of gaze cues using Mixed Reality technologies. In this doctoral consortium presentation, I will present eyemR-Vis, a 360 panoramic Mixed Reality (MR) remote collaboration system that shares gaze behavioural visualisations between a local worker and a remote collaborator. In the paper I discuss the PhD research motivation, background material, recently published study results, and plans for future work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Gaze is one of the most important communication cues in performing physical tasks in both face-to-face and remote collaboration. Dynamic gaze information can indicate the user’s intention, focus, and current attention while visualising this information can often compensate for other communication channels that are not always readily available. Previous studies have shown that sharing and understanding another person’s gaze cues can benefit mutual awareness and task coordination in traditional 2D displays. However, researchers have not fully explored the impact of the virtual representations of gaze cues using Mixed Reality technologies. In this doctoral consortium presentation, I will present eyemR-Vis, a 360 panoramic Mixed Reality (MR) remote collaboration system that shares gaze behavioural visualisations between a local worker and a remote collaborator. In the paper I discuss the PhD research motivation, background material, recently published study results, and plans for future work.",
"fno": "129800a473",
"keywords": [
"Data Visualisation",
"Groupware",
"Handicapped Aids",
"Virtual Reality",
"Gaze Cues",
"Mixed Reality Collaborations",
"Important Communication Cues",
"Physical Tasks",
"Dynamic Gaze Information",
"User",
"Current Attention",
"Communication Channels",
"Mutual Awareness",
"Task Coordination",
"Traditional 2 D Displays",
"Mixed Reality Technologies",
"360 Panoramic Mixed Reality Remote Collaboration System",
"Shares Gaze",
"Remote Collaborator",
"Visualization",
"Two Dimensional Displays",
"Mixed Reality",
"Collaboration",
"Medical Services",
"Communication Channels",
"Task Analysis",
"Gaze Visualisation",
"Mixed Reality Remote Collaboration"
],
"authors": [
{
"affiliation": "University of South Australia",
"fullName": "Allison Jing",
"givenName": "Allison",
"surname": "Jing",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "473-475",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "129800a469",
"articleId": "1yeQY3YdfnW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a476",
"articleId": "1yeQC8OgSoU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a079",
"title": "[POSTER] Mutually Shared Gaze in Augmented Video Conference",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a079/12OmNyQYt9o",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a218",
"title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699227",
"title": "Do You Know What I Mean? An MR-Based Collaborative Platform",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699227/19F1PhUp98k",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714124",
"title": "Video See-Through Mixed Reality with Focus Cues",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714124/1B0XWyWo5KE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a250",
"title": "Using Speech to Visualise Shared Gaze Cues in MR Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a250/1CJcnpSVomk",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a837",
"title": "Comparing Gaze-Supported Modalities with Empathic Mixed Reality Interfaces in Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a837/1JrRgMzkUBq",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798024",
"title": "Head Pointer or Eye Gaze: Which Helps More in MR Remote Collaboration?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798024/1cJ0MmguvG8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798128",
"title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798128/1cJ1aXJnUyI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a393",
"title": "Wearable RemoteFusion: A Mixed Reality Remote Collaboration System with Local Eye Gaze and Remote Hand Gesture Sharing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a393/1gysjIlsYus",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a532",
"title": "TeleGate: Immersive Multi-User Collaboration for Mixed Reality 360°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a532/1tnXy7NpnGg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzcPA9q",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC8uRrd",
"doi": "10.1109/ISMAR.2017.26",
"title": "Mixed Voxel Reality: Presence and Embodiment in Low Fidelity, Visually Coherent, Mixed Reality Environments",
"normalizedTitle": "Mixed Voxel Reality: Presence and Embodiment in Low Fidelity, Visually Coherent, Mixed Reality Environments",
"abstract": "Mixed Reality aims at combining virtual reality with the user's surrounding real environment in a way that they form one, coherent reality. A coherent visual quality is of utmost importance, expressed in measures of e.g. resolution, framerate, and latency for both the real and the virtual domains. For years, researchers have focused on maximizing the quality of the virtual visualization mimicking the real world to get closer to visual coherence. This however, makes Mixed Reality systems overly complex and requires high computational power. In this paper, we propose a different approach by decreasing the realism of one or both visual realms, real and virtual, to achieve visual coherence. Our system coarsely voxelizes the real and virtual environments, objects, and people to provide a believable, coherent mixed voxel reality. In this paper we present the general idea, the current implementation and demonstrate the effectiveness of our approach by technical and empirical evaluations. Our mixed voxel reality system serves as a platform for low-cost presence research and studies on human perception and cognition, a host of diagnostic and therapeutic applications, and for a variety of Mixed Reality applications where users' embodiment is important. Our findings challenge some commonplace assumptions on more is better approaches in mixed reality research and practice-sometimes less can be more.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mixed Reality aims at combining virtual reality with the user's surrounding real environment in a way that they form one, coherent reality. A coherent visual quality is of utmost importance, expressed in measures of e.g. resolution, framerate, and latency for both the real and the virtual domains. For years, researchers have focused on maximizing the quality of the virtual visualization mimicking the real world to get closer to visual coherence. This however, makes Mixed Reality systems overly complex and requires high computational power. In this paper, we propose a different approach by decreasing the realism of one or both visual realms, real and virtual, to achieve visual coherence. Our system coarsely voxelizes the real and virtual environments, objects, and people to provide a believable, coherent mixed voxel reality. In this paper we present the general idea, the current implementation and demonstrate the effectiveness of our approach by technical and empirical evaluations. Our mixed voxel reality system serves as a platform for low-cost presence research and studies on human perception and cognition, a host of diagnostic and therapeutic applications, and for a variety of Mixed Reality applications where users' embodiment is important. Our findings challenge some commonplace assumptions on more is better approaches in mixed reality research and practice-sometimes less can be more.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mixed Reality aims at combining virtual reality with the user's surrounding real environment in a way that they form one, coherent reality. A coherent visual quality is of utmost importance, expressed in measures of e.g. resolution, framerate, and latency for both the real and the virtual domains. For years, researchers have focused on maximizing the quality of the virtual visualization mimicking the real world to get closer to visual coherence. This however, makes Mixed Reality systems overly complex and requires high computational power. In this paper, we propose a different approach by decreasing the realism of one or both visual realms, real and virtual, to achieve visual coherence. Our system coarsely voxelizes the real and virtual environments, objects, and people to provide a believable, coherent mixed voxel reality. In this paper we present the general idea, the current implementation and demonstrate the effectiveness of our approach by technical and empirical evaluations. Our mixed voxel reality system serves as a platform for low-cost presence research and studies on human perception and cognition, a host of diagnostic and therapeutic applications, and for a variety of Mixed Reality applications where users' embodiment is important. Our findings challenge some commonplace assumptions on more is better approaches in mixed reality research and practice-sometimes less can be more.",
"fno": "2943a090",
"keywords": [
"Augmented Reality",
"Data Visualisation",
"Mixed Reality Research",
"Mixed Reality Applications",
"Low Cost Presence Research",
"Mixed Voxel Reality System",
"Coherent Mixed Voxel Reality",
"Believable Voxel Reality",
"Virtual Environments",
"Real Environments",
"Visual Realms",
"High Computational Power",
"Mixed Reality Systems",
"Visual Coherence",
"Virtual Visualization",
"Virtual Domains",
"Coherent Visual Quality",
"Virtual Reality",
"Mixed Reality Environments",
"Coherent Reality Environments",
"Virtual Reality",
"Visualization",
"Cameras",
"Rendering Computer Graphics",
"Hardware",
"Coherence",
"Mixed Reality",
"Augmented Reality",
"Believability",
"Presence",
"Voxel Grid"
],
"authors": [
{
"affiliation": null,
"fullName": "Holger Regenbrecht",
"givenName": "Holger",
"surname": "Regenbrecht",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Katrin Meng",
"givenName": "Katrin",
"surname": "Meng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Arne Reepen",
"givenName": "Arne",
"surname": "Reepen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stephan Beck",
"givenName": "Stephan",
"surname": "Beck",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tobias Langlotz",
"givenName": "Tobias",
"surname": "Langlotz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "90-99",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-2943-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2943a082",
"articleId": "12OmNwGqBn3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2943a100",
"articleId": "12OmNwkhTku",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2010/9343/0/05643556",
"title": "Differential Instant Radiosity for mixed reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643556/12OmNAkWvti",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvri/2011/0054/0/05759615",
"title": "MRStudio: A mixed reality display system for aircraft cockpit",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759615/12OmNC3FGg8",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892266",
"title": "Mobile collaborative mixed reality for supporting scientific inquiry and visualization of earth science data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892266/12OmNCgrCZG",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issre/2014/6033/0/6033a055",
"title": "Deadlock and Temporal Properties Analysis in Mixed Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/issre/2014/6033a055/12OmNwDj17N",
"parentPublication": {
"id": "proceedings/issre/2014/6033/0",
"title": "2014 IEEE 25th International Symposium on Software Reliability Engineering (ISSRE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836528",
"title": "Mixed Reality Extended TV",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836528/12OmNx7ouOs",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643594",
"title": "PoP-EYE environment: Mixed Reality using 3D Photo Collections",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643594/12OmNxXUhWZ",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a072",
"title": "Synthesis of Environment Maps for Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a072/12OmNyfdON8",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131730",
"title": "Evaluating stereo vision and user tracking in mixed reality tasks!",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131730/12OmNzlUKCH",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864515",
"title": "Training Powered Wheelchair Manoeuvres in Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864515/1e5ZsFVNopG",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a508",
"title": "Neural Cameras: Learning Camera Characteristics for Coherent Mixed Reality Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a508/1yeD4rzUalO",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJbEwHHqEg",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJbKx7Zhyo",
"doi": "10.1109/VR51125.2022.00095",
"title": "All Shook Up: The Impact of Floor Vibration in Symmetric and Asymmetric Immersive Multi-user VR Gaming Experiences",
"normalizedTitle": "All Shook Up: The Impact of Floor Vibration in Symmetric and Asymmetric Immersive Multi-user VR Gaming Experiences",
"abstract": "This paper investigates the influence of floor-vibration tactile feedback on immersed users. Under symmetric and asymmetric tactile sensory cue conditions, we explore how multi-user Virtual Reality (VR) experiences are impacted by these cues in terms of illusion and coherence. Based on the reported positive impact of tactile cues in solo VR experiences, we posit that if context-matched perceptual tactile feedback is exchanged between users, they will report a significantly enhanced VR experience compared to not receiving the sensory stimuli, even within the same immersive VR experience. With our custom-built, computer-controlled vibration floor, we implemented a cannonball shooting game for two physically-separated players. In the VR game, the two players shoot cannonballs to destroy their opponent’s protective wall and cannon, while the programmed floor platform generates vertical vibrations depending on the experimental condition. We used a mixed-factorial design with four conditions for each pair of participants: 1) both A and B had vibration, and 2) neither A nor B had vibration (the Symmetric group), or 3) A had vibration, but B did not, and 4) B had vibration, but A did not (the Asymmetric group). We collected subjective and objective data for variables previously shown to be related to levels of illusion, coherence, and usability, including Presence, Co-Presence, Social Presence, Plausibility Illusion, Engagement, Embodiment, Coherence, Gaming Performance, and Overall Preference. A total of 39 pairs of participants were involved in the study. We found statistically significant differences for the vibration conditions on Co-Presence, Social Presence, Engagement, and Coherence, and for the symmetric conditions on the Plausibility Illusion and Coherence, but only with trivial or small effect sizes. The results indicate that vibration provided to a pair of game players in immersive VR can significantly enhance the VR experience, but sensory symmetry does not guarantee improved gaming performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper investigates the influence of floor-vibration tactile feedback on immersed users. Under symmetric and asymmetric tactile sensory cue conditions, we explore how multi-user Virtual Reality (VR) experiences are impacted by these cues in terms of illusion and coherence. Based on the reported positive impact of tactile cues in solo VR experiences, we posit that if context-matched perceptual tactile feedback is exchanged between users, they will report a significantly enhanced VR experience compared to not receiving the sensory stimuli, even within the same immersive VR experience. With our custom-built, computer-controlled vibration floor, we implemented a cannonball shooting game for two physically-separated players. In the VR game, the two players shoot cannonballs to destroy their opponent’s protective wall and cannon, while the programmed floor platform generates vertical vibrations depending on the experimental condition. We used a mixed-factorial design with four conditions for each pair of participants: 1) both A and B had vibration, and 2) neither A nor B had vibration (the Symmetric group), or 3) A had vibration, but B did not, and 4) B had vibration, but A did not (the Asymmetric group). We collected subjective and objective data for variables previously shown to be related to levels of illusion, coherence, and usability, including Presence, Co-Presence, Social Presence, Plausibility Illusion, Engagement, Embodiment, Coherence, Gaming Performance, and Overall Preference. A total of 39 pairs of participants were involved in the study. We found statistically significant differences for the vibration conditions on Co-Presence, Social Presence, Engagement, and Coherence, and for the symmetric conditions on the Plausibility Illusion and Coherence, but only with trivial or small effect sizes. The results indicate that vibration provided to a pair of game players in immersive VR can significantly enhance the VR experience, but sensory symmetry does not guarantee improved gaming performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper investigates the influence of floor-vibration tactile feedback on immersed users. Under symmetric and asymmetric tactile sensory cue conditions, we explore how multi-user Virtual Reality (VR) experiences are impacted by these cues in terms of illusion and coherence. Based on the reported positive impact of tactile cues in solo VR experiences, we posit that if context-matched perceptual tactile feedback is exchanged between users, they will report a significantly enhanced VR experience compared to not receiving the sensory stimuli, even within the same immersive VR experience. With our custom-built, computer-controlled vibration floor, we implemented a cannonball shooting game for two physically-separated players. In the VR game, the two players shoot cannonballs to destroy their opponent’s protective wall and cannon, while the programmed floor platform generates vertical vibrations depending on the experimental condition. We used a mixed-factorial design with four conditions for each pair of participants: 1) both A and B had vibration, and 2) neither A nor B had vibration (the Symmetric group), or 3) A had vibration, but B did not, and 4) B had vibration, but A did not (the Asymmetric group). We collected subjective and objective data for variables previously shown to be related to levels of illusion, coherence, and usability, including Presence, Co-Presence, Social Presence, Plausibility Illusion, Engagement, Embodiment, Coherence, Gaming Performance, and Overall Preference. A total of 39 pairs of participants were involved in the study. We found statistically significant differences for the vibration conditions on Co-Presence, Social Presence, Engagement, and Coherence, and for the symmetric conditions on the Plausibility Illusion and Coherence, but only with trivial or small effect sizes. The results indicate that vibration provided to a pair of game players in immersive VR can significantly enhance the VR experience, but sensory symmetry does not guarantee improved gaming performance.",
"fno": "961700a737",
"keywords": [
"Computer Games",
"Haptic Interfaces",
"Vibrations",
"Virtual Reality",
"Plausibility Illusion",
"Vibration Conditions",
"Social Presence",
"Symmetric Conditions",
"Game Players",
"Improved Gaming Performance",
"Floor Vibration",
"Asymmetric Immersive Multiuser VR Gaming Experiences",
"Floor Vibration Tactile Feedback",
"Immersed Users",
"Asymmetric Tactile Sensory Cue Conditions",
"Multiuser Virtual Reality Experiences",
"Reported Positive Impact",
"Tactile Cues",
"Solo VR Experiences",
"Context Matched Perceptual Tactile Feedback",
"Enhanced VR Experience",
"Sensory Stimuli",
"Immersive VR Experience",
"Computer Controlled Vibration Floor",
"Cannonball Shooting Game",
"Physically Separated Players",
"VR Game",
"Programmed Floor Platform",
"Vertical Vibrations",
"Asymmetric Group",
"Vibrations",
"Three Dimensional Displays",
"Conferences",
"Tactile Sensors",
"Games",
"Coherence",
"Virtual Reality",
"Floor Vibration",
"Whole Body Tactile",
"Tactile",
"Vibration",
"VR Game",
"Shared VR",
"Multiuser VR",
"Competition Game",
"Symmetric",
"Asymmetric"
],
"authors": [
{
"affiliation": "Kennesaw State University,Game Design and Development",
"fullName": "Sungchul Jung",
"givenName": "Sungchul",
"surname": "Jung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury,HIT Lab NZ",
"fullName": "Yuanjie Wu",
"givenName": "Yuanjie",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury,HIT Lab NZ",
"fullName": "Ryan McKee",
"givenName": "Ryan",
"surname": "McKee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury,HIT Lab NZ",
"fullName": "Robert W. Lindeman",
"givenName": "Robert W.",
"surname": "Lindeman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "737-745",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9617-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJbJLTtIAM",
"name": "pvr202296170-09756737s1-mm_961700a737.zip",
"size": "155 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202296170-09756737s1-mm_961700a737.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "961700a728",
"articleId": "1CJc8kd55YY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "961700a746",
"articleId": "1CJcc750PQI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isvri/2011/0054/0/05759663",
"title": "Interactive Mobile Augmented Reality system using a vibro-tactile pad",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759663/12OmNwpoFGV",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/01/07234937",
"title": "Rich Pinch: Perception of Object Movement with Tactile Illusion",
"doi": null,
"abstractUrl": "/journal/th/2016/01/07234937/13rRUEgarnR",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ewdts/2018/5710/0/08524807",
"title": "Design of Digital Gloves with Feedback for VR",
"doi": null,
"abstractUrl": "/proceedings-article/ewdts/2018/08524807/17D45WYQJay",
"parentPublication": {
"id": "proceedings/ewdts/2018/5710/0",
"title": "2018 IEEE East-West Design & Test Symposium (EWDTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a778",
"title": "A Skin Pressure-type Grasping Device to Reproduce Impulse Force for Virtual Ball Games",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a778/1CJd0e2j2jm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798033",
"title": "The Effects of Tactile Gestalt on Generating Velvet Hand Illusion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798033/1cJ0LPy4Yb6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797832",
"title": "Tendon Vibration Increases Vision-induced Kinesthetic Illusions in a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797832/1cJ0Z4AOHN6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a350",
"title": "Investigating Remote Tactile Feedback for Mid-Air Text-Entry in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a350/1pysyvL4CwU",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09386008",
"title": "Floor-vibration VR: Mitigating Cybersickness Using Whole-body Tactile Stimuli in Highly Realistic Vehicle Driving Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09386008/1seiz94oUco",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2020/7397/0/739700a456",
"title": "Development of Touch Valve UI with pseudo-haptics feedback based on vibration of tablet PC",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2020/739700a456/1tGcjlaxzMs",
"parentPublication": {
"id": "proceedings/iiai-aai/2020/7397/0",
"title": "2020 9th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2020/9594/0/09450267",
"title": "A Cognitive Model of the Tactile Vibration Sense and Experiments on a Touch Simulation System",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2020/09450267/1uqFPR9ICaI",
"parentPublication": {
"id": "proceedings/icci*cc/2020/9594/0",
"title": "2020 IEEE 19th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrQTvCTbhK",
"doi": "10.1109/ISMAR55827.2022.00065",
"title": "Plausibility and Perception of Personalized Virtual Humans between Virtual and Augmented Reality",
"normalizedTitle": "Plausibility and Perception of Personalized Virtual Humans between Virtual and Augmented Reality",
"abstract": "This article investigates the effects of different XR displays on the perception and plausibility of personalized virtual humans. We compared immersive virtual reality (VR), video see-through augmented reality (VST AR), and optical see-through AR (OST AR). The personalized virtual alter egos were generated by state-of-the-art photogrammetry methods. 42 participants were repeatedly exposed to animated versions of their 3D-reconstructed virtual alter egos in each of the three XR display conditions. The reconstructed virtual alter egos were additionally modified in body weight for each repetition. We show that the display types lead to different degrees of incongruence between the renderings of the virtual humans and the presentation of the respective environmental backgrounds, leading to significant effects of perceived mismatches as part of a plausibility measurement. The device-related effects were further partly confirmed by subjective misestimations of the modified body weight and the measured spatial presence. Here, the exceedingly incongruent OST AR condition leads to the significantly highest weight misestimations as well as to the lowest perceived spatial presence. However, similar effects could not be confirmed for the affective appraisal (i.e., humanness, eeriness, or attractiveness) of the virtual humans, giving rise to the assumption that these factors might be unrelated to each other.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This article investigates the effects of different XR displays on the perception and plausibility of personalized virtual humans. We compared immersive virtual reality (VR), video see-through augmented reality (VST AR), and optical see-through AR (OST AR). The personalized virtual alter egos were generated by state-of-the-art photogrammetry methods. 42 participants were repeatedly exposed to animated versions of their 3D-reconstructed virtual alter egos in each of the three XR display conditions. The reconstructed virtual alter egos were additionally modified in body weight for each repetition. We show that the display types lead to different degrees of incongruence between the renderings of the virtual humans and the presentation of the respective environmental backgrounds, leading to significant effects of perceived mismatches as part of a plausibility measurement. The device-related effects were further partly confirmed by subjective misestimations of the modified body weight and the measured spatial presence. Here, the exceedingly incongruent OST AR condition leads to the significantly highest weight misestimations as well as to the lowest perceived spatial presence. However, similar effects could not be confirmed for the affective appraisal (i.e., humanness, eeriness, or attractiveness) of the virtual humans, giving rise to the assumption that these factors might be unrelated to each other.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This article investigates the effects of different XR displays on the perception and plausibility of personalized virtual humans. We compared immersive virtual reality (VR), video see-through augmented reality (VST AR), and optical see-through AR (OST AR). The personalized virtual alter egos were generated by state-of-the-art photogrammetry methods. 42 participants were repeatedly exposed to animated versions of their 3D-reconstructed virtual alter egos in each of the three XR display conditions. The reconstructed virtual alter egos were additionally modified in body weight for each repetition. We show that the display types lead to different degrees of incongruence between the renderings of the virtual humans and the presentation of the respective environmental backgrounds, leading to significant effects of perceived mismatches as part of a plausibility measurement. The device-related effects were further partly confirmed by subjective misestimations of the modified body weight and the measured spatial presence. Here, the exceedingly incongruent OST AR condition leads to the significantly highest weight misestimations as well as to the lowest perceived spatial presence. However, similar effects could not be confirmed for the affective appraisal (i.e., humanness, eeriness, or attractiveness) of the virtual humans, giving rise to the assumption that these factors might be unrelated to each other.",
"fno": "532500a489",
"keywords": [
"Augmented Reality",
"Computer Animation",
"Human Factors",
"Image Reconstruction",
"Photogrammetry",
"Virtual Reality",
"3 D Reconstructed Virtual Alter Egos",
"Augmented Reality",
"Device Related Effects",
"Different XR Displays",
"Display Types",
"Exceedingly Incongruent OST AR Condition",
"Humanness",
"Immersive Virtual Reality",
"Modified Body Weight",
"Personalized Virtual Alter Egos",
"Personalized Virtual Humans",
"Plausibility Measurement",
"State Of The Art Photogrammetry Methods",
"XR Display Conditions",
"Weight Measurement",
"Rendering Computer Graphics",
"Optical Imaging",
"Appraisal",
"X Reality",
"Image Reconstruction",
"Mixed Reality",
"Immersion",
"Coherence",
"Presence",
"Body Weight Perception",
"Body Image",
"Serious Application",
"Uncanny Valley Human Centered Computing",
"Empirical Studies In HCI",
"Mixed Augmented Reality",
"Virtual Reality"
],
"authors": [
{
"affiliation": "University of Würzburg,HCI Group",
"fullName": "Erik Wolf",
"givenName": "Erik",
"surname": "Wolf",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg,HCI Group",
"fullName": "David Mal",
"givenName": "David",
"surname": "Mal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg,HCI Group",
"fullName": "Viktor Frohnapfel",
"givenName": "Viktor",
"surname": "Frohnapfel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg,PIIS Group",
"fullName": "Nina Döllinger",
"givenName": "Nina",
"surname": "Döllinger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Dortmund University,Computer Graphics Group",
"fullName": "Stephan Wenninger",
"givenName": "Stephan",
"surname": "Wenninger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Dortmund University,Computer Graphics Group",
"fullName": "Mario Botsch",
"givenName": "Mario",
"surname": "Botsch",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg,HCI Group",
"fullName": "Marc Erich Latoschik",
"givenName": "Marc Erich",
"surname": "Latoschik",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Würzburg,PIIS Group",
"fullName": "Carolin Wienrich",
"givenName": "Carolin",
"surname": "Wienrich",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "489-498",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "532500a479",
"articleId": "1JrR13pPxBK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a499",
"articleId": "1JrR9zO8aHK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2022/05/09714117",
"title": "Breaking Plausibility Without Breaking Presence - Evidence For The Multi-Layer Nature Of Plausibility",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714117/1B0XXIKCoWA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a796",
"title": "A Replication Study to Measure the Perceived Three-Dimensional Location of Virtual Objects in Optical See Through Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a796/1CJfrSkdYUE",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2021/5841/0/584100b291",
"title": "A High-Performance 5G/6G Infrastructure for Augmented, Virtual, and Extended Reality",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2021/584100b291/1EpLpRFjlzq",
"parentPublication": {
"id": "proceedings/csci/2021/5841/0",
"title": "2021 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049665",
"title": "Text Input for Non-Stationary XR Workspaces: Investigating Tap and Word-Gesture Keyboards in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049665/1KYooqYQbF6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049710",
"title": "Exploring Plausibility and Presence in Mixed Reality Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049710/1KYoplRZLWM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a104",
"title": "XR Management Training Simulator supported by Content-Based scenario recommendation",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a104/1KmF8tEedk4",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a448",
"title": "Anon-Emoji: An Optical See-Through Augmented Reality System for Children with Autism Spectrum Disorders to promote Understanding of Facial Expressions and Emotions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a448/1gyskMDWf4I",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a080",
"title": "Can Retinal Projection Displays Improve Spatial Perception in Augmented Reality?",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a080/1pysvYTZF6w",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09429918",
"title": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09429918/1txPs5wi56E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09516987",
"title": "Side-by-Side Comparison of Human Perception and Performance Using Augmented, Hybrid, and Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09516987/1watWKQVFM4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwB2dUd",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBKEymO",
"doi": "10.1109/3DUI.2016.7460032",
"title": "Automated path prediction for redirected walking using navigation meshes",
"normalizedTitle": "Automated path prediction for redirected walking using navigation meshes",
"abstract": "Redirected walking techniques have been introduced to overcome physical space limitations for natural locomotion in virtual reality. These techniques decouple real and virtual user trajectories by subtly steering the user away from the boundaries of the physical space while maintaining the illusion that the user follows the intended virtual path. Effectiveness of redirection algorithms can significantly improve when a reliable prediction of the users future virtual path is available. In current solutions, the future user trajectory is predicted based on non-standardized manual annotations of the environment structure, which is both tedious and inflexible. We propose a method for automatically generating environment annotation graphs and predicting the user trajectory using navigation meshes. We discuss the integration of this method with existing redirected walking algorithms such as FORCE and MPCRed. Automated annotation of the virtual environments structure enables simplified deployment of these algorithms in any virtual environment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking techniques have been introduced to overcome physical space limitations for natural locomotion in virtual reality. These techniques decouple real and virtual user trajectories by subtly steering the user away from the boundaries of the physical space while maintaining the illusion that the user follows the intended virtual path. Effectiveness of redirection algorithms can significantly improve when a reliable prediction of the users future virtual path is available. In current solutions, the future user trajectory is predicted based on non-standardized manual annotations of the environment structure, which is both tedious and inflexible. We propose a method for automatically generating environment annotation graphs and predicting the user trajectory using navigation meshes. We discuss the integration of this method with existing redirected walking algorithms such as FORCE and MPCRed. Automated annotation of the virtual environments structure enables simplified deployment of these algorithms in any virtual environment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking techniques have been introduced to overcome physical space limitations for natural locomotion in virtual reality. These techniques decouple real and virtual user trajectories by subtly steering the user away from the boundaries of the physical space while maintaining the illusion that the user follows the intended virtual path. Effectiveness of redirection algorithms can significantly improve when a reliable prediction of the users future virtual path is available. In current solutions, the future user trajectory is predicted based on non-standardized manual annotations of the environment structure, which is both tedious and inflexible. We propose a method for automatically generating environment annotation graphs and predicting the user trajectory using navigation meshes. We discuss the integration of this method with existing redirected walking algorithms such as FORCE and MPCRed. Automated annotation of the virtual environments structure enables simplified deployment of these algorithms in any virtual environment.",
"fno": "07460032",
"keywords": [
"Prediction Algorithms",
"Navigation",
"Virtual Environments",
"Heuristic Algorithms",
"Trajectory",
"Legged Locomotion",
"Space Exploration",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities",
"I 3 6 Computer Graphics Methodology And Techniques Interaction Techniques"
],
"authors": [
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Mahdi Azmandian",
"givenName": "Mahdi",
"surname": "Azmandian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Timofey Grechkin",
"givenName": "Timofey",
"surname": "Grechkin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Mark Bolas",
"givenName": "Mark",
"surname": "Bolas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Evan Suma",
"givenName": "Evan",
"surname": "Suma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "63-66",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-0842-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07460031",
"articleId": "12OmNxw5Bha",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07460033",
"articleId": "12OmNqI04GH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549395",
"title": "Flexible and general redirected walking for head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549395/12OmNxFJXN3",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446579",
"title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07036075",
"title": "Cognitive Resource Demands of Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/02/mcg2018020044",
"title": "15 Years of Research on Redirected Walking in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/magazine/cg/2018/02/mcg2018020044/13rRUxcsYOr",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09715721",
"title": "Validating Simulation-Based Evaluation of Redirected Walking Systems",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09715721/1B4hxt06P9m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09961901",
"title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798319",
"title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090595",
"title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2022/02/09364750",
"title": "Multi-Technique Redirected Walking Method",
"doi": null,
"abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523832",
"title": "Redirected Walking in Static and Dynamic Scenes Using Visibility Polygons",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523832/1wpqjiNuSqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzaQowA",
"doi": "10.1109/VR.2016.7504714",
"title": "A realistic walking model for enhancing redirection in virtual reality",
"normalizedTitle": "A realistic walking model for enhancing redirection in virtual reality",
"abstract": "Redirected walking algorithms require the prediction of human motion in order to effectively steer users away from the boundaries of the physical space. While a virtual walking trajectory may be represented using straight lines connecting waypoints of interest, this simple model does not accurately represent typical user behavior. In this poster we present a more realistic walking model for use in real-time virtual environments that employ redirection techniques. We implemented the model within a framework that can be used for simulation of redirected walking within different virtual and physical environments. Such simulations are useful for the evaluation of redirected walking algorithms and the tuning of parameters under varying conditions. Additionally, the model can also be used to animate an artificial humanoid “ghost walker” to provide a visual demonstration of redirected walking in virtual reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking algorithms require the prediction of human motion in order to effectively steer users away from the boundaries of the physical space. While a virtual walking trajectory may be represented using straight lines connecting waypoints of interest, this simple model does not accurately represent typical user behavior. In this poster we present a more realistic walking model for use in real-time virtual environments that employ redirection techniques. We implemented the model within a framework that can be used for simulation of redirected walking within different virtual and physical environments. Such simulations are useful for the evaluation of redirected walking algorithms and the tuning of parameters under varying conditions. Additionally, the model can also be used to animate an artificial humanoid “ghost walker” to provide a visual demonstration of redirected walking in virtual reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking algorithms require the prediction of human motion in order to effectively steer users away from the boundaries of the physical space. While a virtual walking trajectory may be represented using straight lines connecting waypoints of interest, this simple model does not accurately represent typical user behavior. In this poster we present a more realistic walking model for use in real-time virtual environments that employ redirection techniques. We implemented the model within a framework that can be used for simulation of redirected walking within different virtual and physical environments. Such simulations are useful for the evaluation of redirected walking algorithms and the tuning of parameters under varying conditions. Additionally, the model can also be used to animate an artificial humanoid “ghost walker” to provide a visual demonstration of redirected walking in virtual reality.",
"fno": "07504714",
"keywords": [
"Legged Locomotion",
"Solid Modeling",
"Biological System Modeling",
"Trajectory",
"Computational Modeling",
"Virtual Reality",
"Prediction Algorithms",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Occidental College",
"fullName": "Courtney Hutton",
"givenName": "Courtney",
"surname": "Hutton",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Evan Suma",
"givenName": "Evan",
"surname": "Suma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "183-184",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504713",
"articleId": "12OmNAS9zzO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504715",
"articleId": "12OmNxu6p8R",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2014/2871/0/06802053",
"title": "An enhanced steering algorithm for redirected walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549412",
"title": "Estimation of detection thresholds for acoustic based redirected walking techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549412/12OmNz2C1yn",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448288",
"title": "Experiencing an Invisible World War I Battlefield Through Narrative-Driven Redirected Walking in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448288/13bd1fZBGdu",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07036075",
"title": "Cognitive Resource Demands of Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/02/mcg2018020044",
"title": "15 Years of Research on Redirected Walking in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/magazine/cg/2018/02/mcg2018020044/13rRUxcsYOr",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09715723",
"title": "Adaptive Redirection: A Context-Aware Redirected Walking Meta-Strategy",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09715723/1B4hxCQXB4c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a524",
"title": "The Chaotic Behavior of Redirection – Revisiting Simulations in Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a524/1CJc4FECUko",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09961901",
"title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798231",
"title": "The Effect of Hanger Reflex on Virtual Reality Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798231/1cJ0KBrAUYE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2016/0840/0/07859537",
"title": "The redirected walking toolkit: a unified development platform for exploring large virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2016/07859537/1h0Jm3Gvypy",
"parentPublication": {
"id": "proceedings/wevr/2016/0840/0",
"title": "2016 IEEE 2nd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwB2dUd",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz4SOsF",
"doi": "10.1109/3DUI.2016.7460030",
"title": "Eye tracking for locomotion prediction in redirected walking",
"normalizedTitle": "Eye tracking for locomotion prediction in redirected walking",
"abstract": "Model predictive control was shown to be a powerful tool for Redirected Walking when used to plan and select future redirection techniques. However, to use it effectively, a good prediction of the user's future actions is crucial. Traditionally, this prediction is made based on the user's position or current direction of movement. In the area of cognitive sciences however, it was shown that a person's gaze can also be highly indicative of his intention in both selection and navigation tasks. In this paper, this effect is used the first time to predict a user's locomotion target during goal-directed locomotion in an immersive virtual environment. After discussing the general implications and challenges of using eye tracking for prediction in a locomotion context, we propose a prediction method for a user's intended locomotion target. This approach is then compared with position based approaches in terms of prediction time and accuracy based on data gathered in an experiment. The results show that, in certain situations, eye tracking allows an earlier prediction compared approaches currently used for redirected walking. However, other recently published prediction methods that are based on the user's position perform almost as well as the eye tracking based approaches presented in this paper.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Model predictive control was shown to be a powerful tool for Redirected Walking when used to plan and select future redirection techniques. However, to use it effectively, a good prediction of the user's future actions is crucial. Traditionally, this prediction is made based on the user's position or current direction of movement. In the area of cognitive sciences however, it was shown that a person's gaze can also be highly indicative of his intention in both selection and navigation tasks. In this paper, this effect is used the first time to predict a user's locomotion target during goal-directed locomotion in an immersive virtual environment. After discussing the general implications and challenges of using eye tracking for prediction in a locomotion context, we propose a prediction method for a user's intended locomotion target. This approach is then compared with position based approaches in terms of prediction time and accuracy based on data gathered in an experiment. The results show that, in certain situations, eye tracking allows an earlier prediction compared approaches currently used for redirected walking. However, other recently published prediction methods that are based on the user's position perform almost as well as the eye tracking based approaches presented in this paper.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Model predictive control was shown to be a powerful tool for Redirected Walking when used to plan and select future redirection techniques. However, to use it effectively, a good prediction of the user's future actions is crucial. Traditionally, this prediction is made based on the user's position or current direction of movement. In the area of cognitive sciences however, it was shown that a person's gaze can also be highly indicative of his intention in both selection and navigation tasks. In this paper, this effect is used the first time to predict a user's locomotion target during goal-directed locomotion in an immersive virtual environment. After discussing the general implications and challenges of using eye tracking for prediction in a locomotion context, we propose a prediction method for a user's intended locomotion target. This approach is then compared with position based approaches in terms of prediction time and accuracy based on data gathered in an experiment. The results show that, in certain situations, eye tracking allows an earlier prediction compared approaches currently used for redirected walking. However, other recently published prediction methods that are based on the user's position perform almost as well as the eye tracking based approaches presented in this paper.",
"fno": "07460030",
"keywords": [
"Legged Locomotion",
"Gaze Tracking",
"Virtual Environments",
"Context",
"Navigation",
"Predictive Control",
"Virtual Reality",
"Tracking",
"Locomotion",
"Eye Tracking",
"Prediction",
"Redirected Walking"
],
"authors": [
{
"affiliation": "Innovation Center Virtual Reality - IWF - ETH Zurich",
"fullName": "Markus Zank",
"givenName": "Markus",
"surname": "Zank",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Innovation Center Virtual Reality - IWF - ETH Zurich",
"fullName": "Andreas Kunz",
"givenName": "Andreas",
"surname": "Kunz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "49-58",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-0842-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07460029",
"articleId": "12OmNy2Jt05",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07460031",
"articleId": "12OmNxw5Bha",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2015/9403/0/9403a229",
"title": "Using Locomotion Models for Estimating Walking Targets in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a229/12OmNB7LvFe",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460032",
"title": "Automated path prediction for redirected walking using navigation meshes",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460032/12OmNBKEymO",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802053",
"title": "An enhanced steering algorithm for redirected walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549395",
"title": "Flexible and general redirected walking for head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549395/12OmNxFJXN3",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446579",
"title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07036075",
"title": "Cognitive Resource Demands of Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09715721",
"title": "Validating Simulation-Based Evaluation of Redirected Walking Systems",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09715721/1B4hxt06P9m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a493",
"title": "Eye Tracking-based LSTM for Locomotion Prediction in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a493/1CJcrKWnUtO",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2022/02/09364750",
"title": "Multi-Technique Redirected Walking Method",
"doi": null,
"abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a727",
"title": "Analyzing Visual Perception and Predicting Locomotion using Virtual Reality and Eye Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a727/1tnWx1pSNpK",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJfaCP53nq",
"doi": "10.1109/VRW55335.2022.00312",
"title": "Robust Redirected Walking in the Wild",
"normalizedTitle": "Robust Redirected Walking in the Wild",
"abstract": "Locomotion is a fundamental component of experiences in virtual reality (VR). However, locomotion in VR is often difficult because the layouts of the physical and virtual environments are often different, which may cause unobstructed paths in the virtual world to correspond to obstructed paths in the physical world. Thus, in order to deliver a comfortable and immersive virtual experience to users, it is important that the user can explore the virtual world using techniques that help them avoid collisions with unseen physical objects. Redirected walking (RDW) is one such technique that enables collision-free locomotion in VR using real walking. Although RDW shows promise as an effective locomotion interface, it has seen relatively little adoption in the consumer market due to the difficulty in deploying effective RDW algorithms that are robust to different environment layouts and different users’ perceptual thresholds. For my thesis, I am focused on developing RDW methods that are capable of enabling collision-free locomotion in arbitrary physical and virtual environments for a wide range of users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Locomotion is a fundamental component of experiences in virtual reality (VR). However, locomotion in VR is often difficult because the layouts of the physical and virtual environments are often different, which may cause unobstructed paths in the virtual world to correspond to obstructed paths in the physical world. Thus, in order to deliver a comfortable and immersive virtual experience to users, it is important that the user can explore the virtual world using techniques that help them avoid collisions with unseen physical objects. Redirected walking (RDW) is one such technique that enables collision-free locomotion in VR using real walking. Although RDW shows promise as an effective locomotion interface, it has seen relatively little adoption in the consumer market due to the difficulty in deploying effective RDW algorithms that are robust to different environment layouts and different users’ perceptual thresholds. For my thesis, I am focused on developing RDW methods that are capable of enabling collision-free locomotion in arbitrary physical and virtual environments for a wide range of users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Locomotion is a fundamental component of experiences in virtual reality (VR). However, locomotion in VR is often difficult because the layouts of the physical and virtual environments are often different, which may cause unobstructed paths in the virtual world to correspond to obstructed paths in the physical world. Thus, in order to deliver a comfortable and immersive virtual experience to users, it is important that the user can explore the virtual world using techniques that help them avoid collisions with unseen physical objects. Redirected walking (RDW) is one such technique that enables collision-free locomotion in VR using real walking. Although RDW shows promise as an effective locomotion interface, it has seen relatively little adoption in the consumer market due to the difficulty in deploying effective RDW algorithms that are robust to different environment layouts and different users’ perceptual thresholds. For my thesis, I am focused on developing RDW methods that are capable of enabling collision-free locomotion in arbitrary physical and virtual environments for a wide range of users.",
"fno": "840200a922",
"keywords": [
"Collision Avoidance",
"Virtual Reality",
"Comfortable Experience",
"Immersive Virtual Experience",
"Virtual World",
"Unseen Physical Objects",
"Collision Free Locomotion",
"VR",
"Locomotion Interface",
"RDW Algorithms",
"Environment Layouts",
"Arbitrary Physical Environments",
"Virtual Environments",
"Robust Redirected Walking",
"Virtual Reality",
"Unobstructed Paths",
"Obstructed Paths",
"Physical World",
"Legged Locomotion",
"Robot Motion",
"Solid Modeling",
"Three Dimensional Displays",
"Heuristic Algorithms",
"Conferences",
"Layout",
"Locomotion X 2014 Redirected Walking X 2014 Natural Walking X 2014 Motion Planning"
],
"authors": [
{
"affiliation": "University of Maryland,College Park",
"fullName": "Niall L. Williams",
"givenName": "Niall L.",
"surname": "Williams",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "922-923",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a920",
"articleId": "1CJettpbljW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a924",
"articleId": "1CJf2pIEthe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504742",
"title": "Simultaneous mapping and redirected walking for ad hoc free walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504742/12OmNyUFg0I",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446563",
"title": "Redirected Walking in Irregularly Shaped Physical Environments with Dynamic Obstacles",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446563/13bd1eW2l9A",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446167",
"title": "Redirected Spaces: Going Beyond Borders",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446167/13bd1fph1xv",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446587",
"title": "Do Textures and Global Illumination Influence the Perception of Redirected Walking Based on Translational Gain?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446587/13bd1gJ1v0m",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09785918",
"title": "Redirected Walking for Exploring Immersive Virtual Spaces with HMD: A Comprehensive Review and Recent Advances",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09785918/1DPaEdHg6KQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049692",
"title": "FREE-RDW: A Multi-user Redirected Walking Method for Supporting Non-forward Steps",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049692/1KYopXwY5Vu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10058042",
"title": "Multi-User Redirected Walking in Separate Physical Spaces for Online VR Scenarios",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10058042/1LbFn8YmYjC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": null,
"article": {
"id": "12OmNrYlmHF",
"doi": "10.1109/3DV.2014.65",
"title": "Building Modeling through Enclosure Reasoning",
"normalizedTitle": "Building Modeling through Enclosure Reasoning",
"abstract": "This paper introduces a method for automatically transforming a point cloud from a laser scanner into a volumetric 3D building model based on the new concept of enclosure reasoning. Rather than simply classifying and modeling building surfaces independently or with pair wise contextual relationships, this work introduces room, floor and building level reasoning. Enclosure reasoning premises that rooms are cycles of walls enclosing free interior space. These cycles should be of minimum description length (MDL) and obey the statistical priors expected for rooms. Floors and buildings then contain the best coverage of the mostly likely rooms. This allows the pipeline to generate higher fidelity models by performing modeling and recognition jointly over the entire building at once. The complete pipeline takes raw, registered laser scan surveys of a single building. It extracts the most likely smooth architectural surfaces, locates the building, and generates wall hypotheses. The algorithm then optimizes the model by growing, merging, and pruning these hypotheses to generate the most likely rooms, floors, and building in the presence of significant clutter.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a method for automatically transforming a point cloud from a laser scanner into a volumetric 3D building model based on the new concept of enclosure reasoning. Rather than simply classifying and modeling building surfaces independently or with pair wise contextual relationships, this work introduces room, floor and building level reasoning. Enclosure reasoning premises that rooms are cycles of walls enclosing free interior space. These cycles should be of minimum description length (MDL) and obey the statistical priors expected for rooms. Floors and buildings then contain the best coverage of the mostly likely rooms. This allows the pipeline to generate higher fidelity models by performing modeling and recognition jointly over the entire building at once. The complete pipeline takes raw, registered laser scan surveys of a single building. It extracts the most likely smooth architectural surfaces, locates the building, and generates wall hypotheses. The algorithm then optimizes the model by growing, merging, and pruning these hypotheses to generate the most likely rooms, floors, and building in the presence of significant clutter.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a method for automatically transforming a point cloud from a laser scanner into a volumetric 3D building model based on the new concept of enclosure reasoning. Rather than simply classifying and modeling building surfaces independently or with pair wise contextual relationships, this work introduces room, floor and building level reasoning. Enclosure reasoning premises that rooms are cycles of walls enclosing free interior space. These cycles should be of minimum description length (MDL) and obey the statistical priors expected for rooms. Floors and buildings then contain the best coverage of the mostly likely rooms. This allows the pipeline to generate higher fidelity models by performing modeling and recognition jointly over the entire building at once. The complete pipeline takes raw, registered laser scan surveys of a single building. It extracts the most likely smooth architectural surfaces, locates the building, and generates wall hypotheses. The algorithm then optimizes the model by growing, merging, and pruning these hypotheses to generate the most likely rooms, floors, and building in the presence of significant clutter.",
"fno": "7000b118",
"keywords": [
"Buildings",
"Three Dimensional Displays",
"Pipelines",
"Cognition",
"Surface Treatment",
"Shape",
"Surface Reconstruction",
"BIM",
"Point Clouds",
"Reverse Engineering"
],
"authors": [
{
"affiliation": null,
"fullName": "Adam Stambler",
"givenName": "Adam",
"surname": "Stambler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Daniel Huber",
"givenName": "Daniel",
"surname": "Huber",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-12-01T00:00:00",
"pubType": "proceedings",
"pages": "118-125",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-7000-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7000b111",
"articleId": "12OmNBOCWhb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7000b129",
"articleId": "12OmNC8uRBp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iceet/2009/3819/3/3819c070",
"title": "FDS Research on Smoke Control in the Stair Enclosure of a High-Rise Building Fire Event",
"doi": null,
"abstractUrl": "/proceedings-article/iceet/2009/3819c070/12OmNBSjITF",
"parentPublication": {
"id": "proceedings/iceet/2009/3819/3",
"title": "Energy and Environment Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a362",
"title": "Automatic Indoor 3D Surface Reconstruction with Segmented Building and Object Elements",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a362/12OmNqHItNn",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/2/7000b103",
"title": "Interactive Mapping of Indoor Building Structures through Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000b103/12OmNyGtjhU",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363363",
"title": "A Rule-Based Ontology Reasoning System for Context-Aware Building Energy Management",
"doi": null,
"abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363363/12OmNyv7lZN",
"parentPublication": {
"id": "proceedings/cit-iucc-dasc-picom/2015/0154/0",
"title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/03/07346513",
"title": "Layer-Wise Floorplan Extraction for Automatic Urban Building Reconstruction",
"doi": null,
"abstractUrl": "/journal/tg/2016/03/07346513/13rRUIM2VH3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2022/6390/0/09904419",
"title": "A Reasoning Engine Architecture for Building Energy Metadata Management",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2022/09904419/1H5KmEUOKAw",
"parentPublication": {
"id": "proceedings/iisa/2022/6390/0",
"title": "2022 13th International Conference on Information, Intelligence, Systems & Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2022/6390/0/09904364",
"title": "A web-based Building Automation and Control Service",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2022/09904364/1H5KxiX1lNC",
"parentPublication": {
"id": "proceedings/iisa/2022/6390/0",
"title": "2022 13th International Conference on Information, Intelligence, Systems & Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a620",
"title": "Immaterial Architecture: Understanding Visualization Through the Lifecycle of a Building",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a620/1rSRamQHMJO",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a277",
"title": "Entity Resolution of Japanese Apartment Property Information Using Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a277/1xPsmDGqtBC",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2021/0898/0/089800b263",
"title": "Interactive Explainable Case-Based Reasoning for Behavior Modelling in Videogames",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2021/089800b263/1zw5W0x5JCM",
"parentPublication": {
"id": "proceedings/ictai/2021/0898/0",
"title": "2021 IEEE 33rd International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwB2dWA",
"title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on",
"acronym": "cdciem",
"groupId": "1800337",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvUaNqF",
"doi": "10.1109/CDCIEM.2011.368",
"title": "Research on Estimation of Trees Crown Volume by 3D Laser Scanning System",
"normalizedTitle": "Research on Estimation of Trees Crown Volume by 3D Laser Scanning System",
"abstract": "This paper aimed to estimate the crown volume by precise 3D laser scan system. Experiment was carried out to scan 13 different trees to get crown points. In the processing3D laser scan data, crown volume modeling by multi-face would be greater then the actual volume and it would be hard to in modeling depression surface of crown volume points. The paper proposed a method to model crown volume by perspective density, which was more scientific and accurate than the way to simulate the crown like truncated cone. Introducing perspective density to estimate the crown volume made the method much easier. Through scanned crown volume of some coniferous and broadleaf trees, crown volume of coniferous was less than the broad-leaved crown when they get approximate shape.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper aimed to estimate the crown volume by precise 3D laser scan system. Experiment was carried out to scan 13 different trees to get crown points. In the processing3D laser scan data, crown volume modeling by multi-face would be greater then the actual volume and it would be hard to in modeling depression surface of crown volume points. The paper proposed a method to model crown volume by perspective density, which was more scientific and accurate than the way to simulate the crown like truncated cone. Introducing perspective density to estimate the crown volume made the method much easier. Through scanned crown volume of some coniferous and broadleaf trees, crown volume of coniferous was less than the broad-leaved crown when they get approximate shape.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper aimed to estimate the crown volume by precise 3D laser scan system. Experiment was carried out to scan 13 different trees to get crown points. In the processing3D laser scan data, crown volume modeling by multi-face would be greater then the actual volume and it would be hard to in modeling depression surface of crown volume points. The paper proposed a method to model crown volume by perspective density, which was more scientific and accurate than the way to simulate the crown like truncated cone. Introducing perspective density to estimate the crown volume made the method much easier. Through scanned crown volume of some coniferous and broadleaf trees, crown volume of coniferous was less than the broad-leaved crown when they get approximate shape.",
"fno": "4350a265",
"keywords": [
"Crown Volume",
"3 D Laser Scanning",
"Model",
"Perspective Density",
"Spatial Distribution"
],
"authors": [
{
"affiliation": null,
"fullName": "Wu Lulu",
"givenName": "Wu",
"surname": "Lulu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wu Bin",
"givenName": "Wu",
"surname": "Bin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cdciem",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-02-01T00:00:00",
"pubType": "proceedings",
"pages": "265-268",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4350-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4350a261",
"articleId": "12OmNwAKCQf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4350a269",
"articleId": "12OmNyywxAd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icicta/2009/3804/4/3804e668",
"title": "Study on Scanning Pattern during Laser Metal Deposition Shaping",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2009/3804e668/12OmNBBQZni",
"parentPublication": {
"id": "proceedings/icicta/2009/3804/4",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/2/3336d012",
"title": "Research on a New Kind of Adaptive Parallel Scan Method in Laser Metal Deposition Shaping",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336d012/12OmNBEGYLh",
"parentPublication": {
"id": "proceedings/csse/2008/3336/6",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2013/2322/0/2322a043",
"title": "Laser Sheet Scanning Based Smoke Acquisition and Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2013/2322a043/12OmNqJZgBe",
"parentPublication": {
"id": "proceedings/icvrv/2013/2322/0",
"title": "2013 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/1/3962a775",
"title": "Calibration Technology of Palmer Scanning Airborne Lidar with Vector Measurements",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962a775/12OmNwE9OTS",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/1",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2011/4455/0/4455a207",
"title": "Application of 3D Laser Scanning Technique in the Conservation of Geotechnical Cultural Relics in China",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2011/4455a207/12OmNx8fihm",
"parentPublication": {
"id": "proceedings/icdma/2011/4455/0",
"title": "2011 Second International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kam/2009/3888/3/3888c206",
"title": "Research on Three-Dimensional Point Clouds Processing for Standing Tree Volume Based on Laser Scanner",
"doi": null,
"abstractUrl": "/proceedings-article/kam/2009/3888c206/12OmNyFCvV2",
"parentPublication": {
"id": "proceedings/kam/2009/3888/1",
"title": "Knowledge Acquisition and Modeling, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130229",
"title": "A mobile structured light system for food volume estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130229/12OmNyRg4uD",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/1998/9180/0/91800063",
"title": "Edge Preservation in Volume Rendering Using Splatting",
"doi": null,
"abstractUrl": "/proceedings-article/vv/1998/91800063/12OmNzJbQVc",
"parentPublication": {
"id": "proceedings/vv/1998/9180/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06784056",
"title": "Data-Driven Synthetic Modeling of Trees",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06784056/13rRUx0xPIK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061405",
"title": "Volumetric Modeling in Laser BPH Therapy Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061405/13rRUxC0SEd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy2agSz",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvsDHIB",
"doi": "10.1109/CVPRW.2015.7301298",
"title": "Automation of dormant pruning in specialty crop production: An adaptive framework for automatic reconstruction and modeling of apple trees",
"normalizedTitle": "Automation of dormant pruning in specialty crop production: An adaptive framework for automatic reconstruction and modeling of apple trees",
"abstract": "Dormant pruning is one of the most costly and labor-intensive operations in specialty crop production. During winter, a large crew of trained seasonal workers has to carefully remove the branches from hundreds of trees using a set of pre-defined rules. The goal of automatic pruning is to reduce this dependence on a large workforce that is currently needed for the job. Automatically applying the pruning “rules” entails construction of 3D models of the trees in their dormant condition (that is, without foliage) and accurate estimation of the pruning points on the branches. This paper investigates the use of Skeleton-based Geometric (SbG) features in a 3D reconstruction scheme. The results obtained demonstrate the effectiveness of the SbG features for automatic reconstruction using only two views - the front and the back. Our results show that our proposed scheme locates the pruning points on the tree branches with an accuracy of 96.0%. The algorithm that locates the pruning points is based on a new adaptive circle-based-layer-aware modeling scheme for the trunks and the primary branches “PBs” of the trees. Its three main steps are detection, segmentation, and modeling. Localization of the pruning points on the tree branches is a part of the modeling step. Both qualitative and quantitative evaluation are performed on a new challenging apple-trees dataset that is collected for the purpose of evaluating our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dormant pruning is one of the most costly and labor-intensive operations in specialty crop production. During winter, a large crew of trained seasonal workers has to carefully remove the branches from hundreds of trees using a set of pre-defined rules. The goal of automatic pruning is to reduce this dependence on a large workforce that is currently needed for the job. Automatically applying the pruning “rules” entails construction of 3D models of the trees in their dormant condition (that is, without foliage) and accurate estimation of the pruning points on the branches. This paper investigates the use of Skeleton-based Geometric (SbG) features in a 3D reconstruction scheme. The results obtained demonstrate the effectiveness of the SbG features for automatic reconstruction using only two views - the front and the back. Our results show that our proposed scheme locates the pruning points on the tree branches with an accuracy of 96.0%. The algorithm that locates the pruning points is based on a new adaptive circle-based-layer-aware modeling scheme for the trunks and the primary branches “PBs” of the trees. Its three main steps are detection, segmentation, and modeling. Localization of the pruning points on the tree branches is a part of the modeling step. Both qualitative and quantitative evaluation are performed on a new challenging apple-trees dataset that is collected for the purpose of evaluating our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dormant pruning is one of the most costly and labor-intensive operations in specialty crop production. During winter, a large crew of trained seasonal workers has to carefully remove the branches from hundreds of trees using a set of pre-defined rules. The goal of automatic pruning is to reduce this dependence on a large workforce that is currently needed for the job. Automatically applying the pruning “rules” entails construction of 3D models of the trees in their dormant condition (that is, without foliage) and accurate estimation of the pruning points on the branches. This paper investigates the use of Skeleton-based Geometric (SbG) features in a 3D reconstruction scheme. The results obtained demonstrate the effectiveness of the SbG features for automatic reconstruction using only two views - the front and the back. Our results show that our proposed scheme locates the pruning points on the tree branches with an accuracy of 96.0%. The algorithm that locates the pruning points is based on a new adaptive circle-based-layer-aware modeling scheme for the trunks and the primary branches “PBs” of the trees. Its three main steps are detection, segmentation, and modeling. Localization of the pruning points on the tree branches is a part of the modeling step. Both qualitative and quantitative evaluation are performed on a new challenging apple-trees dataset that is collected for the purpose of evaluating our approach.",
"fno": "07301298",
"keywords": [
"Computational Geometry",
"Crops",
"Feature Extraction",
"Image Reconstruction",
"Image Segmentation",
"Dormant Pruning Automation",
"Adaptive Framework",
"Automatic Apple Tree Reconstruction",
"Automatic Apple Tree Modeling",
"Specialty Crop Production",
"Seasonal Workers",
"Pruning Rules",
"3 D Model Construction",
"Dormant Condition",
"Pruning Point Estimation",
"Skeleton Based Geometric Features",
"3 D Reconstruction Scheme",
"Sb G Features",
"Automatic Reconstruction",
"Front Views",
"Back Views",
"Tree Branches",
"Adaptive Circle Based Layer Aware Modeling Scheme",
"Tree Trunks",
"Primary Branches",
"PB",
"Detection Step",
"Segmentation Step",
"Modeling Step",
"Qualitative Evaluation",
"Quantitative Evaluation",
"Apple Tree Dataset",
"Three Dimensional Displays",
"Feature Extraction",
"Vegetation",
"Sensors",
"Image Reconstruction",
"Skeleton",
"Adaptation Models"
],
"authors": [
{
"affiliation": "Purdue University School of Electrical and Computer Engineering, Electrical Engineering Building, 475 Northwestern Ave, West Lafayette, IN 47907, United States",
"fullName": "Noha M. Elfiky",
"givenName": "Noha M.",
"surname": "Elfiky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University School of Electrical and Computer Engineering, Electrical Engineering Building, 475 Northwestern Ave, West Lafayette, IN 47907, United States",
"fullName": "Shayan A. Akbar",
"givenName": "Shayan A.",
"surname": "Akbar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University School of Electrical and Computer Engineering, Electrical Engineering Building, 475 Northwestern Ave, West Lafayette, IN 47907, United States",
"fullName": "Jianxin Sun",
"givenName": "Jianxin",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University School of Electrical and Computer Engineering, Electrical Engineering Building, 475 Northwestern Ave, West Lafayette, IN 47907, United States",
"fullName": "Johnny Park",
"givenName": "Johnny",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University School of Electrical and Computer Engineering, Electrical Engineering Building, 475 Northwestern Ave, West Lafayette, IN 47907, United States",
"fullName": "Avinash Kak",
"givenName": "Avinash",
"surname": "Kak",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-06-01T00:00:00",
"pubType": "proceedings",
"pages": "65-73",
"year": "2015",
"issn": "2160-7508",
"isbn": "978-1-4673-6759-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07301297",
"articleId": "12OmNC3XhyF",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07301299",
"articleId": "12OmNwMXnmT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2016/1437/0/1437a347",
"title": "A Novel Benchmark RGBD Dataset for Dormant Apple Trees and Its Application to Automatic Pruning",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a347/12OmNAu1FlE",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a338",
"title": "A Novel Visualization Tool for Evaluating the Accuracy of 3D Sensing and Reconstruction Algorithms for Automatic Dormant Pruning Applications",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a338/12OmNBOllg4",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/3/73103656",
"title": "Tree pruning strategy in automated detection of coronary trees in cineangiograms",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73103656/12OmNBpEeL7",
"parentPublication": {
"id": "proceedings/icip/1995/7310/3",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifcsta/2009/3930/1/3930a030",
"title": "Hybrid Pruning Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ifcsta/2009/3930a030/12OmNyqzM58",
"parentPublication": {
"id": "proceedings/ifcsta/2009/3930/3",
"title": "Computer Science-Technology and Applications, International Forum on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c269",
"title": "Pruning the 3D Curve Skeleton",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c269/12OmNzICEBg",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2007/2874/3/28740213",
"title": "RST in Decision Tree Pruning",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2007/28740213/12OmNzTYC9e",
"parentPublication": {
"id": "proceedings/fskd/2007/2874/3",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477596",
"title": "Measuring and modeling apple trees using time-of-flight data for automation of dormant pruning applications",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477596/12OmNzw8jeL",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2014/08/06574839",
"title": "Pruning Incremental Linear Model Trees with Approximate Lookahead",
"doi": null,
"abstractUrl": "/journal/tk/2014/08/06574839/13rRUIJuxpW",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06784056",
"title": "Data-Driven Synthetic Modeling of Trees",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06784056/13rRUx0xPIK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2007/03/i0449",
"title": "Skeleton Pruning by Contour Partitioning with Discrete Curve Evolution",
"doi": null,
"abstractUrl": "/journal/tp/2007/03/i0449/13rRUxDqS57",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCbCrVT",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyen1n1",
"doi": "10.1109/CVPR.2014.87",
"title": "Data-Driven Flower Petal Modeling with Botany Priors",
"normalizedTitle": "Data-Driven Flower Petal Modeling with Botany Priors",
"abstract": "In this paper we focus on the 3D modeling of flower, in particular the petals. The complex structure, severe occlusions, and wide variations make the reconstruction of their 3D models a challenging task. Therefore, even though the flower is the most distinctive part of a plant, there has been little modeling study devoted to it. We overcome these challenges by combining data driven modeling techniques with domain knowledge from botany. Taking a 3D point cloud of an input flower scanned from a single view, our method starts with a level-set based segmentation of each individual petal, using both appearance and 3D information. Each segmented petal is then fitted with a scale-invariant morphable petal shape model, which is constructed from individually scanned exemplar petals. Novel constraints based on botany studies, such as the number and spatial layout of petals, are incorporated into the fitting process for realistically reconstructing occluded regions and maintaining correct 3D spatial relations. Finally, the reconstructed petal shape is texture mapped using the registered color images, with occluded regions filled in by content from visible ones. Experiments show that our approach can obtain realistic modeling of flowers even with severe occlusions and large shape/size variations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we focus on the 3D modeling of flower, in particular the petals. The complex structure, severe occlusions, and wide variations make the reconstruction of their 3D models a challenging task. Therefore, even though the flower is the most distinctive part of a plant, there has been little modeling study devoted to it. We overcome these challenges by combining data driven modeling techniques with domain knowledge from botany. Taking a 3D point cloud of an input flower scanned from a single view, our method starts with a level-set based segmentation of each individual petal, using both appearance and 3D information. Each segmented petal is then fitted with a scale-invariant morphable petal shape model, which is constructed from individually scanned exemplar petals. Novel constraints based on botany studies, such as the number and spatial layout of petals, are incorporated into the fitting process for realistically reconstructing occluded regions and maintaining correct 3D spatial relations. Finally, the reconstructed petal shape is texture mapped using the registered color images, with occluded regions filled in by content from visible ones. Experiments show that our approach can obtain realistic modeling of flowers even with severe occlusions and large shape/size variations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we focus on the 3D modeling of flower, in particular the petals. The complex structure, severe occlusions, and wide variations make the reconstruction of their 3D models a challenging task. Therefore, even though the flower is the most distinctive part of a plant, there has been little modeling study devoted to it. We overcome these challenges by combining data driven modeling techniques with domain knowledge from botany. Taking a 3D point cloud of an input flower scanned from a single view, our method starts with a level-set based segmentation of each individual petal, using both appearance and 3D information. Each segmented petal is then fitted with a scale-invariant morphable petal shape model, which is constructed from individually scanned exemplar petals. Novel constraints based on botany studies, such as the number and spatial layout of petals, are incorporated into the fitting process for realistically reconstructing occluded regions and maintaining correct 3D spatial relations. Finally, the reconstructed petal shape is texture mapped using the registered color images, with occluded regions filled in by content from visible ones. Experiments show that our approach can obtain realistic modeling of flowers even with severe occlusions and large shape/size variations.",
"fno": "5118a636",
"keywords": [
"Shape",
"Three Dimensional Displays",
"Image Reconstruction",
"Solid Modeling",
"Image Segmentation",
"Layout",
"Data Models"
],
"authors": [
{
"affiliation": null,
"fullName": "Chenxi Zhang",
"givenName": "Chenxi",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mao Ye",
"givenName": "Mao",
"surname": "Ye",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bo Fu",
"givenName": "Bo",
"surname": "Fu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ruigang Yang",
"givenName": "Ruigang",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-06-01T00:00:00",
"pubType": "proceedings",
"pages": "636-643",
"year": "2014",
"issn": "1063-6919",
"isbn": "978-1-4799-5118-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5118a628",
"articleId": "12OmNrJ11yx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5118a644",
"articleId": "12OmNzXFoH6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icisce/2017/3013/0/3013a001",
"title": "3D Reconstruction of Orchid Based on Virtual Binocular Vision Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2017/3013a001/12OmNBQ2VXt",
"parentPublication": {
"id": "proceedings/icisce/2017/3013/0",
"title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2016/2303/0/2303a025",
"title": "Detail-Preserving 3D Shape Modeling from Raw Volumetric Dataset via Hessian-Constrained Local Implicit Surfaces Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2016/2303a025/12OmNCbU38Z",
"parentPublication": {
"id": "proceedings/cw/2016/2303/0",
"title": "2016 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2008/3494/2/3494b216",
"title": "A Flower Growth Simulation based on Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/3494b216/12OmNvTTceb",
"parentPublication": {
"id": "proceedings/isise/2008/3494/2",
"title": "2008 International Symposium on Information Science and Engineering (ISISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457507",
"title": "Large-scale urban environment modeling from videos using image content segmentation and alignment",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457507/12OmNvk7JL0",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206535",
"title": "Piecewise planar city 3D modeling from street view panoramic sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206535/12OmNwcCISc",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2013/5053/0/06474997",
"title": "Image segmentation for large-scale subcategory flower recognition",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2013/06474997/12OmNybfqVj",
"parentPublication": {
"id": "proceedings/wacv/2013/5053/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/08531721",
"title": "Data-Driven Indoor Scene Modeling from a Single Color Image with Iterative Object Segmentation and Model Retrieval",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/08531721/17D45Xq6dDe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09837104",
"title": "Modeling of the 3D Tree Skeleton using Real-World Data: A Survey",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09837104/1FbOBmxWmRi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874256",
"title": "Efficient Flower Text Entry in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874256/1GjwONKhl84",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a012",
"title": "Flower Factory: A Component-based Approach for Rapid Flower Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a012/1pysvDRGQq4",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwKoZd0",
"title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications",
"acronym": "trustcom",
"groupId": "1800729",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzZmZoe",
"doi": "10.1109/TrustCom.2011.201",
"title": "Modeling Trees with Rugged Surfaces",
"normalizedTitle": "Modeling Trees with Rugged Surfaces",
"abstract": "In this study, a method for modeling trees with rugged surfaces by simulating tree growth is proposed. The phenomena of cell division is considered for simulating tree growth. There are two types of cells that affect the growth of trees, namely, the apical meristem cells and the cambium cells. The former cells lie at the apex of a branch and are responsible for the extension of the branch. The latter cells cover the surface of the tree and are responsible for its lateral growth. Further, knots are generated by unusual and uneven growth of the cambium cells. To simulate these phenomena, a tree is modeled as a polygon mesh which grows by displacing the vertices of the mesh. Each vertex acts as an apical meristem cell or a cambium cell. The tree growth is defined by an L-System. Subsequently, tree models with rugged surfaces, such as those including knots, are generated.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this study, a method for modeling trees with rugged surfaces by simulating tree growth is proposed. The phenomena of cell division is considered for simulating tree growth. There are two types of cells that affect the growth of trees, namely, the apical meristem cells and the cambium cells. The former cells lie at the apex of a branch and are responsible for the extension of the branch. The latter cells cover the surface of the tree and are responsible for its lateral growth. Further, knots are generated by unusual and uneven growth of the cambium cells. To simulate these phenomena, a tree is modeled as a polygon mesh which grows by displacing the vertices of the mesh. Each vertex acts as an apical meristem cell or a cambium cell. The tree growth is defined by an L-System. Subsequently, tree models with rugged surfaces, such as those including knots, are generated.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this study, a method for modeling trees with rugged surfaces by simulating tree growth is proposed. The phenomena of cell division is considered for simulating tree growth. There are two types of cells that affect the growth of trees, namely, the apical meristem cells and the cambium cells. The former cells lie at the apex of a branch and are responsible for the extension of the branch. The latter cells cover the surface of the tree and are responsible for its lateral growth. Further, knots are generated by unusual and uneven growth of the cambium cells. To simulate these phenomena, a tree is modeled as a polygon mesh which grows by displacing the vertices of the mesh. Each vertex acts as an apical meristem cell or a cambium cell. The tree growth is defined by an L-System. Subsequently, tree models with rugged surfaces, such as those including knots, are generated.",
"fno": "06120997",
"keywords": [
"Biology Computing",
"Botany",
"Computer Aided Analysis",
"Mesh Generation",
"Vegetation",
"Tree Modeling",
"Tree Growth Simulation",
"Cell Division",
"Apical Meristem Cells",
"Cambium Cells",
"Polygon Mesh",
"L System",
"Computer Generated Image",
"Vegetation",
"Vectors",
"Mathematical Model",
"Nonhomogeneous Media",
"Shape",
"Equations",
"Merging",
"Computer Graphics",
"Natural Phenomena",
"Trees Growth Model"
],
"authors": [
{
"affiliation": null,
"fullName": "Atsushi Mizoguchi",
"givenName": "Atsushi",
"surname": "Mizoguchi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kazunori Miyata",
"givenName": "Kazunori",
"surname": "Miyata",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "trustcom",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1464-1471",
"year": "2011",
"issn": "2324-898X",
"isbn": "978-1-4577-2135-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06120996",
"articleId": "12OmNqyDjtt",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06120998",
"articleId": "12OmNCga1SI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2011/1799/0/06120469",
"title": "Cell Resolution 3D Reconstruction of Developing Multilayer Tissues from Sparsely Sampled Volumetric Microscopy Images",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2011/06120469/12OmNBEpnA5",
"parentPublication": {
"id": "proceedings/bibm/2011/1799/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pci/2012/4825/0/4825a138",
"title": "The Deletion Operation in xBR-Trees",
"doi": null,
"abstractUrl": "/proceedings-article/pci/2012/4825a138/12OmNBKW9D0",
"parentPublication": {
"id": "proceedings/pci/2012/4825/0",
"title": "2012 16th Panhellenic Conference on Informatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019440",
"title": "Scene text detection based on pruning strategy of MSER-trees and Linkage-trees",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019440/12OmNyKrH2M",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2014/08/06495455",
"title": "An Approximation Algorithm for Constructing Degree-Dependent Node-Weighted Multicast Trees",
"doi": null,
"abstractUrl": "/journal/td/2014/08/06495455/13rRUEgs2Bz",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/02/06777276",
"title": "Pitman Yor Diffusion Trees for Bayesian Hierarchical Clustering",
"doi": null,
"abstractUrl": "/journal/tp/2015/02/06777276/13rRUEgs2Nc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2015/06/06964811",
"title": "Evolutionary Bayesian Rose Trees",
"doi": null,
"abstractUrl": "/journal/tk/2015/06/06964811/13rRUwbaqVi",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06784056",
"title": "Data-Driven Synthetic Modeling of Trees",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06784056/13rRUx0xPIK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/04/06905854",
"title": "A Scalable and Accurate Descriptor for Dynamic Textures Using Bag of System Trees",
"doi": null,
"abstractUrl": "/journal/tp/2015/04/06905854/13rRUxjQyd4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/5555/01/09684987",
"title": "Best Match Graphs with Binary Trees",
"doi": null,
"abstractUrl": "/journal/tb/5555/01/09684987/1Ai9qCqtebC",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2019/4752/0/09212896",
"title": "Interactive Modeling of Trees Using VR Devices",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2019/09212896/1nHRRssduko",
"parentPublication": {
"id": "proceedings/icvrv/2019/4752/0",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jPb3itzOTK",
"title": "2020 3rd International Conference on Information and Computer Technologies (ICICT)",
"acronym": "icict",
"groupId": "1825584",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jPb7TIJWXm",
"doi": "10.1109/ICICT50521.2020.00022",
"title": "Simulate Forest Trees by Integrating L-System and 3D CAD Files",
"normalizedTitle": "Simulate Forest Trees by Integrating L-System and 3D CAD Files",
"abstract": "In this article, we propose a new approach for simu-lating trees, including their branches, sub-branches, and leaves. This approach combines the theory of biological development, mathematical models, and computer graphics, producing simu-lated trees and forest with full geometry. Specifically, we adopt the Lindenmayer process to simulate the branching pattern of trees and modify the available measurements and dimensions of 3D CAD developed object files to create natural looking sub-branches and leaves. Randomization has been added to the placement of all branches, sub branches and leaves. To simulate a forest, we adopt Inhomogeneous Poisson process to generate random locations of trees. Our approach can be used to create complex structured 3D virtual environment for the purpose of testing new sensors and training robotic algorithms. We look forward to applying this approach to test biosonar sensors that mimick bats' fly in the simulated environment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this article, we propose a new approach for simu-lating trees, including their branches, sub-branches, and leaves. This approach combines the theory of biological development, mathematical models, and computer graphics, producing simu-lated trees and forest with full geometry. Specifically, we adopt the Lindenmayer process to simulate the branching pattern of trees and modify the available measurements and dimensions of 3D CAD developed object files to create natural looking sub-branches and leaves. Randomization has been added to the placement of all branches, sub branches and leaves. To simulate a forest, we adopt Inhomogeneous Poisson process to generate random locations of trees. Our approach can be used to create complex structured 3D virtual environment for the purpose of testing new sensors and training robotic algorithms. We look forward to applying this approach to test biosonar sensors that mimick bats' fly in the simulated environment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this article, we propose a new approach for simu-lating trees, including their branches, sub-branches, and leaves. This approach combines the theory of biological development, mathematical models, and computer graphics, producing simu-lated trees and forest with full geometry. Specifically, we adopt the Lindenmayer process to simulate the branching pattern of trees and modify the available measurements and dimensions of 3D CAD developed object files to create natural looking sub-branches and leaves. Randomization has been added to the placement of all branches, sub branches and leaves. To simulate a forest, we adopt Inhomogeneous Poisson process to generate random locations of trees. Our approach can be used to create complex structured 3D virtual environment for the purpose of testing new sensors and training robotic algorithms. We look forward to applying this approach to test biosonar sensors that mimick bats' fly in the simulated environment.",
"fno": "728300a091",
"keywords": [
"CAD",
"Production Engineering Computing",
"Stochastic Processes",
"Virtual Reality",
"3 D CAD Files",
"Biological Development",
"Mathematical Models",
"Computer Graphics",
"Lindenmayer Process",
"Object Files",
"Inhomogeneous Poisson Process",
"Complex Structured 3 D Virtual Environment",
"Simulated Environment",
"Simulated Forest Trees",
"Solid Modeling",
"Vegetation",
"Three Dimensional Displays",
"Computational Modeling",
"Forestry",
"Visualization",
"Geometry",
"L Systems",
"CAD",
"Inhomogeneous Poisson Processes IP Ps",
"Simulated Trees"
],
"authors": [
{
"affiliation": "Dept. of Statistics Virginia Tech, Blacksburg, USA",
"fullName": "M. Hassan Tanveer",
"givenName": "M. Hassan",
"surname": "Tanveer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "DIBRIS University of Genova ,Genova, Italy",
"fullName": "Antony Thomas",
"givenName": "Antony",
"surname": "Thomas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Statistics, Virginia Tech, Blacksburg, USA",
"fullName": "Xiaowei Wu",
"givenName": "Xiaowei",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Statistics Virginia Tech, Blacksburg, USA",
"fullName": "Hongxiao Zhu",
"givenName": "Hongxiao",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icict",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "91-95",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7283-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "728300a086",
"articleId": "1jPb7JTjaGQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "728300a096",
"articleId": "1jPb53JIU5q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isvri/2011/0054/0/05759637",
"title": "Realistic real-time rendering for large-scale forest scenes",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759637/12OmNqH9hks",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2012/4771/0/4771a087",
"title": "Visualizing the Evolution of Software Systems Using the Forest Metaphor",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2012/4771a087/12OmNzEmFFO",
"parentPublication": {
"id": "proceedings/iv/2012/4771/0",
"title": "2012 16th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477596",
"title": "Measuring and modeling apple trees using time-of-flight data for automation of dormant pruning applications",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477596/12OmNzw8jeL",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/12/07368927",
"title": "Tree Modeling with Real Tree-Parts Examples",
"doi": null,
"abstractUrl": "/journal/tg/2016/12/07368927/13rRUxlgxTp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/12/07775115",
"title": "Creative Virtual Tree Modeling Through Hierarchical Topology-Preserving Blending",
"doi": null,
"abstractUrl": "/journal/tg/2017/12/07775115/13rRUyYSWl4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/05/07836345",
"title": "Real-Time Interactive Tree Animation",
"doi": null,
"abstractUrl": "/journal/tg/2018/05/07836345/13rRUyoPSP9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2019/06/08409324",
"title": "Introducing Cuts Into a Top-Down Process for Checking Tree Inclusion",
"doi": null,
"abstractUrl": "/journal/tk/2019/06/08409324/13rRUypp58b",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2021/0679/0/067900a055",
"title": "3D Forest-tree Modeling Approach Based on Loading Segment Models",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2021/067900a055/1CATsJrErhC",
"parentPublication": {
"id": "proceedings/itme/2021/0679/0",
"title": "2021 11th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdacai/2022/5470/0/547000a279",
"title": "Sustainable forest management: Decision model based on simulation of forest carbon sequestration considering forest products effect",
"doi": null,
"abstractUrl": "/proceedings-article/icdacai/2022/547000a279/1J7WRAgJhTi",
"parentPublication": {
"id": "proceedings/icdacai/2022/5470/0",
"title": "2022 International Conference on Data Analytics, Computing and Artificial Intelligence (ICDACAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bracis/2019/4253/0/425300a777",
"title": "Visual Approach to Support Analysis of Optimum-Path Forest Classifier",
"doi": null,
"abstractUrl": "/proceedings-article/bracis/2019/425300a777/1fHkFuvyNMY",
"parentPublication": {
"id": "proceedings/bracis/2019/4253/0",
"title": "2019 8th Brazilian Conference on Intelligent Systems (BRACIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx6g6nR",
"title": "2015 International Conference on Computer Science and Applications (CSA)",
"acronym": "csa",
"groupId": "1803775",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzyp5YU",
"doi": "10.1109/CSA.2015.53",
"title": "Isogeometric Analysis: The Influence of Penalty Coefficients in Boundary Condition Treatments",
"normalizedTitle": "Isogeometric Analysis: The Influence of Penalty Coefficients in Boundary Condition Treatments",
"abstract": "The NURBS-based isogeometric analysis (IGA) has been applied to wide classes of engineering problems due to its accurate geometric description. One of the major concerns with IGA is finding an efficient way to treat boundary conditions. Many studies show that the results can maintain accuracy, where the boundary conditions are enforced by the penalty method. However, the inappropriate selection of penalty coefficients may lead to significant errors. Consequently, a suggestion of penalty coefficients with different degrees and control points of IGA is given in this work through some representative numerical examples from the point of the improvement in accuracy, robustness and rate of convergence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The NURBS-based isogeometric analysis (IGA) has been applied to wide classes of engineering problems due to its accurate geometric description. One of the major concerns with IGA is finding an efficient way to treat boundary conditions. Many studies show that the results can maintain accuracy, where the boundary conditions are enforced by the penalty method. However, the inappropriate selection of penalty coefficients may lead to significant errors. Consequently, a suggestion of penalty coefficients with different degrees and control points of IGA is given in this work through some representative numerical examples from the point of the improvement in accuracy, robustness and rate of convergence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The NURBS-based isogeometric analysis (IGA) has been applied to wide classes of engineering problems due to its accurate geometric description. One of the major concerns with IGA is finding an efficient way to treat boundary conditions. Many studies show that the results can maintain accuracy, where the boundary conditions are enforced by the penalty method. However, the inappropriate selection of penalty coefficients may lead to significant errors. Consequently, a suggestion of penalty coefficients with different degrees and control points of IGA is given in this work through some representative numerical examples from the point of the improvement in accuracy, robustness and rate of convergence.",
"fno": "9961a213",
"keywords": [
"Splines Mathematics",
"Surface Topography",
"Surface Reconstruction",
"Stress",
"Boundary Conditions",
"Convergence",
"Penalty Coefficients",
"Isogeometric Analysis",
"NURBS",
"The Penalty Method",
"Boundary Conditions"
],
"authors": [
{
"affiliation": null,
"fullName": "Feng Chang",
"givenName": "Feng",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wei-Qiang Wang",
"givenName": "Wei-Qiang",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yan Liu",
"givenName": "Yan",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yan-Peng Qu",
"givenName": "Yan-Peng",
"surname": "Qu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "csa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-11-01T00:00:00",
"pubType": "proceedings",
"pages": "213-217",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-9961-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "9961a208",
"articleId": "12OmNy50ghi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "9961a218",
"articleId": "12OmNyiUBpS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icisce/2017/3013/0/3013a802",
"title": "A Method for Calculation of Hydrodynamic Coefficients Based on NURBS",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2017/3013a802/12OmNBzRNpF",
"parentPublication": {
"id": "proceedings/icisce/2017/3013/0",
"title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815006",
"title": "Isogeometric Analysis Based on a Set of Truncated Interpolatory Basis Functions",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815006/12OmNvAiSa4",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a080",
"title": "Research on a New Linear Interpolation Algorithm of NURBS Curve",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a080/12OmNxwWoum",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06846294",
"title": "Direct Isosurface Ray Casting of NURBS-Based Isogeometric Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06846294/13rRUwvT9gu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2016/3593/0/07982321",
"title": "Data Structure for Supporting Patch Refinement in Adaptive Isogeometric Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2016/07982321/17D45WLdYRa",
"parentPublication": {
"id": "proceedings/cse-euc/2016/3593/0",
"title": "2016 19th IEEE Intl Conference on Computational Science and Engineering (CSE), IEEE 14th Intl Conference on Embedded and Ubiquitous Computing (EUC), and 15th Intl Symposium on Distributed Computing and Applications for Business Engineering (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2018/5500/0/550000a073",
"title": "A Note on the Convergence of NURBS Curves When Weights Approach Infinity",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2018/550000a073/17D45WXIkzJ",
"parentPublication": {
"id": "proceedings/icisce/2018/5500/0",
"title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2019/05/08523633",
"title": "Parallel Refined Isogeometric Analysis in 3D",
"doi": null,
"abstractUrl": "/journal/td/2019/05/08523633/17D45XreC6n",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2018/8481/0/848100a097",
"title": "Linear Motor Platform Contouring Control Based on NURBS Curve Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2018/848100a097/17D45XwUAKx",
"parentPublication": {
"id": "proceedings/icmcce/2018/8481/0",
"title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a061",
"title": "Point Cloud Hole Filling Based on Feature Lines Extraction",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a061/1ap5xjYpSZa",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscipt/2021/4137/0/413700a679",
"title": "Mechanical Simulation of Ankle joint based on Isogeometric Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iscipt/2021/413700a679/1zzpK2KDv0s",
"parentPublication": {
"id": "proceedings/iscipt/2021/4137/0",
"title": "2021 6th International Symposium on Computer and Information Processing Technology (ISCIPT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxvO06W",
"title": "2009 10th International Conference on Document Analysis and Recognition",
"acronym": "icdar",
"groupId": "1000219",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNANTArL",
"doi": "10.1109/ICDAR.2009.274",
"title": "A Method for Automatically Extracting Road Layers from Raster Maps",
"normalizedTitle": "A Method for Automatically Extracting Road Layers from Raster Maps",
"abstract": "To exploit the road network in raster maps, the first step is to extract the pixels that constitute the roads and then vectorize the road pixels. Identifying colors that represent roads in raster maps for extracting road pixels is difficult since raster maps often contain numerous colors due to the noise introduced during the processes of image compression and scanning. In this paper, we present an approach that minimizes the required user input for identifying the road colors representing the road network in a raster map. We can then use the identified road colors to extract road pixels from the map. Our approach can be used on scanned and compressed maps that are otherwise difficult to process automatically and tedious to process manually. We tested our approach with 100 maps from a variety of sources, which include 90 scanned maps with various compression levels and 10 computer generated maps. We successfully identified the road colors and extracted the road pixels from all test maps with fewer than four user labels per map on average.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To exploit the road network in raster maps, the first step is to extract the pixels that constitute the roads and then vectorize the road pixels. Identifying colors that represent roads in raster maps for extracting road pixels is difficult since raster maps often contain numerous colors due to the noise introduced during the processes of image compression and scanning. In this paper, we present an approach that minimizes the required user input for identifying the road colors representing the road network in a raster map. We can then use the identified road colors to extract road pixels from the map. Our approach can be used on scanned and compressed maps that are otherwise difficult to process automatically and tedious to process manually. We tested our approach with 100 maps from a variety of sources, which include 90 scanned maps with various compression levels and 10 computer generated maps. We successfully identified the road colors and extracted the road pixels from all test maps with fewer than four user labels per map on average.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To exploit the road network in raster maps, the first step is to extract the pixels that constitute the roads and then vectorize the road pixels. Identifying colors that represent roads in raster maps for extracting road pixels is difficult since raster maps often contain numerous colors due to the noise introduced during the processes of image compression and scanning. In this paper, we present an approach that minimizes the required user input for identifying the road colors representing the road network in a raster map. We can then use the identified road colors to extract road pixels from the map. Our approach can be used on scanned and compressed maps that are otherwise difficult to process automatically and tedious to process manually. We tested our approach with 100 maps from a variety of sources, which include 90 scanned maps with various compression levels and 10 computer generated maps. We successfully identified the road colors and extracted the road pixels from all test maps with fewer than four user labels per map on average.",
"fno": "3725a838",
"keywords": [
"Raster Map",
"Road",
"Hough Transformation",
"Vectorization"
],
"authors": [
{
"affiliation": null,
"fullName": "Yao-Yi Chiang",
"givenName": "Yao-Yi",
"surname": "Chiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Craig A. Knoblock",
"givenName": "Craig A.",
"surname": "Knoblock",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-07-01T00:00:00",
"pubType": "proceedings",
"pages": "838-842",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3725-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3725a833",
"articleId": "12OmNCcKQqS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3725a843",
"articleId": "12OmNBIFmtf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2010/4109/0/4109d199",
"title": "An Approach for Recognizing Text Labels in Raster Maps",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d199/12OmNAtK4nN",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmit/2008/3556/0/3556a381",
"title": "Road Extraction from Color Raster Urban Triffic Map",
"doi": null,
"abstractUrl": "/proceedings-article/mmit/2008/3556a381/12OmNB8CiYM",
"parentPublication": {
"id": "proceedings/mmit/2008/3556/0",
"title": "MultiMedia and Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/2/252121034",
"title": "Classification of Line and Character Pixels on Raster Maps Using Discrete Cosine Transformation Coefficients and Support Vector Machine",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252121034/12OmNBV9IgD",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/2",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iptc/2010/4196/0/4196a188",
"title": "Spatial Interpolation Method of Scalar Data Based on Raster Distance Transformation of Map Algebra",
"doi": null,
"abstractUrl": "/proceedings-article/iptc/2010/4196a188/12OmNrH1PCh",
"parentPublication": {
"id": "proceedings/iptc/2010/4196/0",
"title": "Intelligence Information Processing and Trusted Computing, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ettandgrs/2008/3563/2/3563b303",
"title": "Raster Image Districting and Its Application to Geoinformation",
"doi": null,
"abstractUrl": "/proceedings-article/ettandgrs/2008/3563b303/12OmNvTjZV6",
"parentPublication": {
"id": "ettandgrs/2008/3563/2",
"title": "Education Technology and Training & Geoscience and Remote Sensing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmit/2008/3556/0/3556a385",
"title": "Study on Extracting the Colors of Road Network Based on the Highway Traffic Map Images",
"doi": null,
"abstractUrl": "/proceedings-article/mmit/2008/3556a385/12OmNxWLTEW",
"parentPublication": {
"id": "proceedings/mmit/2008/3556/0",
"title": "MultiMedia and Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1996/7436/0/74360497",
"title": "A method to fuse two kinds of digital road maps",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1996/74360497/12OmNxwENwT",
"parentPublication": {
"id": "proceedings/icmcs/1996/7436/0",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2009/3725/0/3725a376",
"title": "Raster Map Image Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2009/3725a376/12OmNz6iOF6",
"parentPublication": {
"id": "proceedings/icdar/2009/3725/0",
"title": "2009 10th International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csi/2015/9642/0/9642a127",
"title": "An Algorithm for Triangulations of Terrain Maps Represented by Homogeneous Raster Data",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csi/2015/9642a127/12OmNzVoBPd",
"parentPublication": {
"id": "proceedings/acit-csi/2015/9642/0",
"title": "2015 3rd International Conference on Applied Computing and Information Technology/2nd International Conference on Computational Science and Intelligence (ACIT-CSI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icapr/2009/3520/0/3520a318",
"title": "Raster-to-Vector Conversion: Problems and Tools Towards a Solution ",
"doi": null,
"abstractUrl": "/proceedings-article/icapr/2009/3520a318/12OmNzlly5l",
"parentPublication": {
"id": "proceedings/icapr/2009/3520/0",
"title": "Advances in Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzEVRU8",
"title": "2011 International Conference on Future Computer Science and Education",
"acronym": "icfcse",
"groupId": "1800528",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBAqZHA",
"doi": "10.1109/ICFCSE.2011.27",
"title": "Research on the Silk Road Tourism Development from the Perspective of Tourist Destination",
"normalizedTitle": "Research on the Silk Road Tourism Development from the Perspective of Tourist Destination",
"abstract": "After the Trade Road, Culture Gallery and Traffic Road, the Silk Road will be a bright tourist route in the world map in the 21st century. At present, the Silk Road Tourism has started, however, there are a series of problems, such as weak awareness, inconvenient traffic, inadequate investment, repetitious construction, disorder management and competition. Blessed with beautiful natural scene, long history, profound culture and rich ethnic flavor, it will surely become a global top tourist destination as long as the market orientation is proper, government takes the leading role and enterprises actively participate.",
"abstracts": [
{
"abstractType": "Regular",
"content": "After the Trade Road, Culture Gallery and Traffic Road, the Silk Road will be a bright tourist route in the world map in the 21st century. At present, the Silk Road Tourism has started, however, there are a series of problems, such as weak awareness, inconvenient traffic, inadequate investment, repetitious construction, disorder management and competition. Blessed with beautiful natural scene, long history, profound culture and rich ethnic flavor, it will surely become a global top tourist destination as long as the market orientation is proper, government takes the leading role and enterprises actively participate.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "After the Trade Road, Culture Gallery and Traffic Road, the Silk Road will be a bright tourist route in the world map in the 21st century. At present, the Silk Road Tourism has started, however, there are a series of problems, such as weak awareness, inconvenient traffic, inadequate investment, repetitious construction, disorder management and competition. Blessed with beautiful natural scene, long history, profound culture and rich ethnic flavor, it will surely become a global top tourist destination as long as the market orientation is proper, government takes the leading role and enterprises actively participate.",
"fno": "06041663",
"keywords": [
"Roads",
"Industries",
"Government",
"Investments",
"Economics",
"Cities And Towns",
"Development Trend",
"Tourism",
"Silk Road"
],
"authors": [
{
"affiliation": null,
"fullName": "Haojie Sun",
"givenName": "Haojie",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yu Dong",
"givenName": "Yu",
"surname": "Dong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yong Li",
"givenName": "Yong",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xuegang Chen",
"givenName": "Xuegang",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icfcse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-08-01T00:00:00",
"pubType": "proceedings",
"pages": "75-79",
"year": "2011",
"issn": null,
"isbn": "978-1-4577-1562-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06041662",
"articleId": "12OmNz5JC1G",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06041664",
"articleId": "12OmNyfdOY0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icee/2010/3997/0/3997f570",
"title": "The Discussion for CI-CM Model Application in the Culture Creative Industry of Binhai New District, Tianjin",
"doi": null,
"abstractUrl": "/proceedings-article/icee/2010/3997f570/12OmNwE9OUp",
"parentPublication": {
"id": "proceedings/icee/2010/3997/0",
"title": "International Conference on E-Business and E-Government",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ettandgrs/2008/3563/2/3563b007",
"title": "The Effect of Nationality on the Multidimensionality of Tourist Destination Image: A Case of Hangzhou",
"doi": null,
"abstractUrl": "/proceedings-article/ettandgrs/2008/3563b007/12OmNx76TVJ",
"parentPublication": {
"id": "ettandgrs/2008/3563/2",
"title": "Education Technology and Training & Geoscience and Remote Sensing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsde/2010/3958/0/3958a301",
"title": "Discuss on the Researching and Using Geo-Spatial Technology In the History Changing of the Silk Road",
"doi": null,
"abstractUrl": "/proceedings-article/dsde/2010/3958a301/12OmNy4r3Yc",
"parentPublication": {
"id": "proceedings/dsde/2010/3958/0",
"title": "Data Storage and Data Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icscse/2017/1401/0/1401a100",
"title": "Retracted: The Silk Road Economic Belt-Urumqi Construction for Regional Financial Center Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icscse/2017/1401a100/12OmNyuPLiO",
"parentPublication": {
"id": "proceedings/icscse/2017/1401/0",
"title": "2017 International Conference on Smart City and Systems Engineering (ICSCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06774478",
"title": "Drawing Road Networks with Mental Maps",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06774478/13rRUwbs2b5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlbdbi/2021/1790/0/179000a076",
"title": "Design of China's Ancient Silk Road Display System Based on WebXR",
"doi": null,
"abstractUrl": "/proceedings-article/mlbdbi/2021/179000a076/1BQitdWTpcI",
"parentPublication": {
"id": "proceedings/mlbdbi/2021/1790/0",
"title": "2021 3rd International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaice/2021/2186/0/218600a231",
"title": "Design and Implementation of China's Ancient Silk Road Platform Based on Human-Computer Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/icaice/2021/218600a231/1Et4Hri1Ik0",
"parentPublication": {
"id": "proceedings/icaice/2021/2186/0",
"title": "2021 2nd International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccea/2020/5904/0/09103876",
"title": "Progress in the Construction of Marine Observation Data Sources on the Western Route of the Maritime Silk Road",
"doi": null,
"abstractUrl": "/proceedings-article/iccea/2020/09103876/1kesCtzetc4",
"parentPublication": {
"id": "proceedings/iccea/2020/5904/0",
"title": "2020 International Conference on Computer Engineering and Application (ICCEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbaie/2020/6499/0/09196312",
"title": "Hierarchical System of Dry Bulk Shipping Network in the "21st Century Maritime Silk Road"",
"doi": null,
"abstractUrl": "/proceedings-article/icbaie/2020/09196312/1n90Z0zKEqk",
"parentPublication": {
"id": "proceedings/icbaie/2020/6499/0",
"title": "2020 International Conference on Big Data, Artificial Intelligence and Internet of Things Engineering (ICBAIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdia/2020/2232/0/223200a451",
"title": "Establishment of wave climate datasets: Case study for the Maritime Silk Road",
"doi": null,
"abstractUrl": "/proceedings-article/bigdia/2020/223200a451/1stvzNdCUTe",
"parentPublication": {
"id": "proceedings/bigdia/2020/2232/0",
"title": "2020 6th International Conference on Big Data and Information Analytics (BigDIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNylboru",
"title": "2015 10th International Conference on Broadband and Wireless Computing, Communication and Applications (BWCCA)",
"acronym": "bwcca",
"groupId": "1800183",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCctf9F",
"doi": "10.1109/BWCCA.2015.8",
"title": "An Analysis of Road Maps Based on Voronoi Diagram for Vehicular Broadcast",
"normalizedTitle": "An Analysis of Road Maps Based on Voronoi Diagram for Vehicular Broadcast",
"abstract": "In Vehicular Ad-hoc Networks, a method of message dissemination is broadcast. It is a method of message dissemination by relaying messages between vehicles. In broadcast, vehicles cause the broadcast storm problem because of redundant relays. Thus, it is necessary to control redundant relays by broadcast protocols. Many broadcast protocols have been developed and they can disseminate with suppressing redundant relays. However, the packet reception ratio and its overhead depend on not only broadcast protocols, but also structures of road maps. Thus, it is necessary to research some relationships between radio propagation features and structures of road maps. In this paper, we propose a method of analyzing complexities of road maps. It is named Analysis Road map Complexity by Deviation of intersection distribution (ARC-DID). It focus on that there is a relationship between radio propagation features and places of intersections. Hence, we propose an analysis method of complexities of road maps based on intersection distribution. In order to analyze complexities of road maps based on a deviation of intersections, we use a Voronoi diagram. According to this method, we can analyze complexities of road maps quantitatively. In order to show utility of complexities calculated by proposed method, we execute a broadcast simulation with three parameters. According to the simulation, complexities calculated by ARC-DID can set parameters which adapt to road maps.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In Vehicular Ad-hoc Networks, a method of message dissemination is broadcast. It is a method of message dissemination by relaying messages between vehicles. In broadcast, vehicles cause the broadcast storm problem because of redundant relays. Thus, it is necessary to control redundant relays by broadcast protocols. Many broadcast protocols have been developed and they can disseminate with suppressing redundant relays. However, the packet reception ratio and its overhead depend on not only broadcast protocols, but also structures of road maps. Thus, it is necessary to research some relationships between radio propagation features and structures of road maps. In this paper, we propose a method of analyzing complexities of road maps. It is named Analysis Road map Complexity by Deviation of intersection distribution (ARC-DID). It focus on that there is a relationship between radio propagation features and places of intersections. Hence, we propose an analysis method of complexities of road maps based on intersection distribution. In order to analyze complexities of road maps based on a deviation of intersections, we use a Voronoi diagram. According to this method, we can analyze complexities of road maps quantitatively. In order to show utility of complexities calculated by proposed method, we execute a broadcast simulation with three parameters. According to the simulation, complexities calculated by ARC-DID can set parameters which adapt to road maps.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In Vehicular Ad-hoc Networks, a method of message dissemination is broadcast. It is a method of message dissemination by relaying messages between vehicles. In broadcast, vehicles cause the broadcast storm problem because of redundant relays. Thus, it is necessary to control redundant relays by broadcast protocols. Many broadcast protocols have been developed and they can disseminate with suppressing redundant relays. However, the packet reception ratio and its overhead depend on not only broadcast protocols, but also structures of road maps. Thus, it is necessary to research some relationships between radio propagation features and structures of road maps. In this paper, we propose a method of analyzing complexities of road maps. It is named Analysis Road map Complexity by Deviation of intersection distribution (ARC-DID). It focus on that there is a relationship between radio propagation features and places of intersections. Hence, we propose an analysis method of complexities of road maps based on intersection distribution. In order to analyze complexities of road maps based on a deviation of intersections, we use a Voronoi diagram. According to this method, we can analyze complexities of road maps quantitatively. In order to show utility of complexities calculated by proposed method, we execute a broadcast simulation with three parameters. According to the simulation, complexities calculated by ARC-DID can set parameters which adapt to road maps.",
"fno": "8315a150",
"keywords": [
"Roads",
"Relays",
"Vehicles",
"Complexity Theory",
"Protocols",
"Radio Propagation",
"Radiation Detectors",
"Voronoi Diagram",
"VANE Ts",
"Broadcast",
"Complexity"
],
"authors": [
{
"affiliation": null,
"fullName": "Ryo Yanagida",
"givenName": "Ryo",
"surname": "Yanagida",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Keiji Obara",
"givenName": "Keiji",
"surname": "Obara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Koki Ogawa",
"givenName": "Koki",
"surname": "Ogawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hiroshi Shigeno",
"givenName": "Hiroshi",
"surname": "Shigeno",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bwcca",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-11-01T00:00:00",
"pubType": "proceedings",
"pages": "150-156",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-8315-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8315a143",
"articleId": "12OmNAmVH4a",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8315a157",
"articleId": "12OmNwCsdzR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fit/2013/2503/0/2293a177",
"title": "Towards Improving Vehicular Communication in Modern Vehicular Environment",
"doi": null,
"abstractUrl": "/proceedings-article/fit/2013/2293a177/12OmNAsBFMd",
"parentPublication": {
"id": "proceedings/fit/2013/2503/0",
"title": "2013 11th International Conference on Frontiers of Information Technology (FIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2011/661/0/05723168",
"title": "Road-based broadcast message dissemination approach in VANETs",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2011/05723168/12OmNBOllnC",
"parentPublication": {
"id": "proceedings/icoin/2011/661/0",
"title": "2011 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2016/2020/0/07498423",
"title": "Capacity-Constrained Network-Voronoi Diagram",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2016/07498423/12OmNBTJIyt",
"parentPublication": {
"id": "proceedings/icde/2016/2020/0",
"title": "2016 IEEE 32nd International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2015/8471/0/8471a088",
"title": "Geolocation for Printed Maps Using Line Segment-Based SIFT-like Feature Matching",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2015/8471a088/12OmNqNXEoo",
"parentPublication": {
"id": "proceedings/ismarw/2015/8471/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2002/1435/9/14350298",
"title": "Multipoint Relaying for Flooding Broadcast Messages in Mobile Wireless Networks",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2002/14350298/12OmNvTBAZG",
"parentPublication": {
"id": "proceedings/hicss/2002/1435/9",
"title": "Proceedings of the 35th Annual Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2006/2630/0/26300186",
"title": "Voronoi Diagram Based Automated Skeleton Extraction from Colour Scanned Maps",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2006/26300186/12OmNzV70LQ",
"parentPublication": {
"id": "proceedings/isvd/2006/2630/0",
"title": "2006 3rd International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06774478",
"title": "Drawing Road Networks with Mental Maps",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06774478/13rRUwbs2b5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2015/11/07123646",
"title": "Capacity-Constrained Network-Voronoi Diagram",
"doi": null,
"abstractUrl": "/journal/tk/2015/11/07123646/13rRUwh80uW",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2023/03/09519523",
"title": "An Efficient Cooperative Transmission Based Opportunistic Broadcast Scheme in VANETs",
"doi": null,
"abstractUrl": "/journal/tm/2023/03/09519523/1wc8RfcX9n2",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2023/04/09566795",
"title": "A Fast, Reliable, Opportunistic Broadcast Scheme With Mitigation of Internal Interference in VANETs",
"doi": null,
"abstractUrl": "/journal/tm/2023/04/09566795/1xC6Ow4Bgha",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzUPpw6",
"title": "2011 IEEE 36th Conference on Local Computer Networks",
"acronym": "lcn",
"groupId": "1000419",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwxlrgo",
"doi": "10.1109/LCN.2011.6115522",
"title": "MapCorrect: Automatic correction and validation of road maps using public sensing",
"normalizedTitle": "MapCorrect: Automatic correction and validation of road maps using public sensing",
"abstract": "With the increasing proliferation of small and cheap GPS receivers, a new way of generating road maps could be witnessed over the last few years. Participatory mapping approaches like OpenStreetMap introduced a way to generate road maps collaboratively from scratch. Moreover, automatic mapping algorithms were proposed, which automatically infer road maps from a set of given GPS traces. Nevertheless, one of the main problems of these maps is their unknown quality in terms of accuracy, which makes them unreliable and, therefore, not applicable for the use in critical scenarios. To address this issue, we propose MapCorrect: An automatic map correction and validation system. MapCorrect automatically collects GPS traces from people's mobile devices to correct a given road map and validate it by identifying those parts of the map that are accurately mapped with respect to some user provided quality requirements. Since fixing a GPS position is a battery draining operation, the collection of GPS data raises concerns about the energy consumption of the participating mobile devices. We tackle this issue by introducing an optimized sensing mechanism that gives the mobile devices notifications indicating those parts of the map that are considered as sufficiently mapped and, therefore, require no further GPS data for their validation. Furthermore, we show by simulation that using this approach up to 50% of the mobile phones' energy can be saved while not impairing the effectiveness of the map correction and validation process at all.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the increasing proliferation of small and cheap GPS receivers, a new way of generating road maps could be witnessed over the last few years. Participatory mapping approaches like OpenStreetMap introduced a way to generate road maps collaboratively from scratch. Moreover, automatic mapping algorithms were proposed, which automatically infer road maps from a set of given GPS traces. Nevertheless, one of the main problems of these maps is their unknown quality in terms of accuracy, which makes them unreliable and, therefore, not applicable for the use in critical scenarios. To address this issue, we propose MapCorrect: An automatic map correction and validation system. MapCorrect automatically collects GPS traces from people's mobile devices to correct a given road map and validate it by identifying those parts of the map that are accurately mapped with respect to some user provided quality requirements. Since fixing a GPS position is a battery draining operation, the collection of GPS data raises concerns about the energy consumption of the participating mobile devices. We tackle this issue by introducing an optimized sensing mechanism that gives the mobile devices notifications indicating those parts of the map that are considered as sufficiently mapped and, therefore, require no further GPS data for their validation. Furthermore, we show by simulation that using this approach up to 50% of the mobile phones' energy can be saved while not impairing the effectiveness of the map correction and validation process at all.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the increasing proliferation of small and cheap GPS receivers, a new way of generating road maps could be witnessed over the last few years. Participatory mapping approaches like OpenStreetMap introduced a way to generate road maps collaboratively from scratch. Moreover, automatic mapping algorithms were proposed, which automatically infer road maps from a set of given GPS traces. Nevertheless, one of the main problems of these maps is their unknown quality in terms of accuracy, which makes them unreliable and, therefore, not applicable for the use in critical scenarios. To address this issue, we propose MapCorrect: An automatic map correction and validation system. MapCorrect automatically collects GPS traces from people's mobile devices to correct a given road map and validate it by identifying those parts of the map that are accurately mapped with respect to some user provided quality requirements. Since fixing a GPS position is a battery draining operation, the collection of GPS data raises concerns about the energy consumption of the participating mobile devices. We tackle this issue by introducing an optimized sensing mechanism that gives the mobile devices notifications indicating those parts of the map that are considered as sufficiently mapped and, therefore, require no further GPS data for their validation. Furthermore, we show by simulation that using this approach up to 50% of the mobile phones' energy can be saved while not impairing the effectiveness of the map correction and validation process at all.",
"fno": "06115522",
"keywords": [
"Global Positioning System",
"Mobile Handsets",
"Radio Receivers",
"Wireless Sensor Networks",
"Map Correct",
"Road Map Automatic Correction",
"Public Sensing",
"GPS Receivers",
"Open Street Map",
"Automatic Validation System",
"Mobile Devices",
"GPS Position",
"GPS Data",
"Energy Consumption",
"Mobile Phone",
"Roads",
"Global Positioning System",
"Mobile Handsets",
"Sensors",
"Accuracy",
"Servers",
"Mobile Communication",
"Wireless Sensor Networks",
"Mobile Computing",
"Energy Aware Systems"
],
"authors": [
{
"affiliation": "Institute of Parallel and Distributed Systems, Universität Stuttgart, 70569 Stuttgart, Germany",
"fullName": "Patrick Baier",
"givenName": "Patrick",
"surname": "Baier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Parallel and Distributed Systems, Universität Stuttgart, 70569 Stuttgart, Germany",
"fullName": "Harald Weinschrott",
"givenName": "Harald",
"surname": "Weinschrott",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Parallel and Distributed Systems, Universität Stuttgart, 70569 Stuttgart, Germany",
"fullName": "Frank Dürr",
"givenName": "Frank",
"surname": "Dürr",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Parallel and Distributed Systems, Universität Stuttgart, 70569 Stuttgart, Germany",
"fullName": "Kurt Rothermel",
"givenName": "Kurt",
"surname": "Rothermel",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "lcn",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-10-01T00:00:00",
"pubType": "proceedings",
"pages": "58-66",
"year": "2011",
"issn": "0742-1303",
"isbn": "978-1-61284-926-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06115513",
"articleId": "12OmNAtK4i4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06115534",
"articleId": "12OmNzahcdE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/hpcc/2016/4297/0/07828526",
"title": "Registration of Low Cost Maps within Large Scale MMS Maps",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc/2016/07828526/12OmNApLGsF",
"parentPublication": {
"id": "proceedings/hpcc/2016/4297/0",
"title": "2016 IEEE 18th International Conference on High-Performance Computing and Communications, IEEE 14th International Conference on Smart City, and IEEE 2nd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2018/3649/0/364901a426",
"title": "A Map Matching Algorithm for Complex Road Conditions Based on Base Station Data",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2018/364901a426/12OmNApu5hN",
"parentPublication": {
"id": "proceedings/bigcomp/2018/3649/0",
"title": "2018 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2017/3932/0/07962459",
"title": "Topic Model-Based Road Network Inference from Massive Trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2017/07962459/12OmNwDACd5",
"parentPublication": {
"id": "proceedings/mdm/2017/3932/0",
"title": "2017 18th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2012/4779/0/4779a082",
"title": "Estimating the Number of Lanes on Rapid Road Map Survey System Using GPS Trajectories as Collective Intelligence",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2012/4779a082/12OmNxbEtIl",
"parentPublication": {
"id": "proceedings/nbis/2012/4779/0",
"title": "2012 15th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imis/2016/0984/0/0984a376",
"title": "Skyline Query Processing System for Taiwan Maps",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2016/0984a376/12OmNyr8YzL",
"parentPublication": {
"id": "proceedings/imis/2016/0984/0",
"title": "2016 10th International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06774478",
"title": "Drawing Road Networks with Mental Maps",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06774478/13rRUwbs2b5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bwcca/2010/4236/0/05633020",
"title": "A Road Location Estimating Method by Many Trajectories to Realize Rapid Map Survey for a Car Navigation System",
"doi": null,
"abstractUrl": "/proceedings-article/bwcca/2010/05633020/183rAfmup6p",
"parentPublication": {
"id": "proceedings/bwcca/2010/4236/0",
"title": "2010 International Conference on Broadband, Wireless Computing, Communication and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2019/7789/0/08679461",
"title": "Road Segment Interpolation for Incomplete Road Data",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2019/08679461/18Xkis4mchy",
"parentPublication": {
"id": "proceedings/bigcomp/2019/7789/0",
"title": "2019 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2019/7789/0/08679294",
"title": "Calculation of Average Road Speed Based on Car-to-Car Messaging",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2019/08679294/18XklPRCUJW",
"parentPublication": {
"id": "proceedings/bigcomp/2019/7789/0",
"title": "2019 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icict/2021/1399/0/139900a194",
"title": "Graph Representation of Road Network for Mobility-Impaired Persons",
"doi": null,
"abstractUrl": "/proceedings-article/icict/2021/139900a194/1vg8uU7rhni",
"parentPublication": {
"id": "proceedings/icict/2021/1399/0",
"title": "2021 4th International Conference on Information and Computer Technologies (ICICT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrIrPwW",
"title": "Multimedia Computing and Systems, International Conference on",
"acronym": "icmcs",
"groupId": "1000479",
"volume": "0",
"displayVolume": "0",
"year": "1996",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxwENwT",
"doi": "10.1109/MMCS.1996.535898",
"title": "A method to fuse two kinds of digital road maps",
"normalizedTitle": "A method to fuse two kinds of digital road maps",
"abstract": "Abstract: The acquisition of digital road maps is essential for automobile navigation. Digital road maps are used in Japan by the Digital Road Map Association (DRMA) based on 1/25,000 map. The digital map, however, is unsuitable for town driving. In this situation a more detailed digital road map is requested by the users. To get a detailed digital road map from a paper map is a very expensive operation. We proposed a new method to obtain a more precise digital road map using a more detailed general purpose digital map which has already been published. However, using this digital map directly for auto-navigation is impossible. The method we propose in this paper obtains more detailed road network data from the 1/10,000 map and from the DRMA data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract: The acquisition of digital road maps is essential for automobile navigation. Digital road maps are used in Japan by the Digital Road Map Association (DRMA) based on 1/25,000 map. The digital map, however, is unsuitable for town driving. In this situation a more detailed digital road map is requested by the users. To get a detailed digital road map from a paper map is a very expensive operation. We proposed a new method to obtain a more precise digital road map using a more detailed general purpose digital map which has already been published. However, using this digital map directly for auto-navigation is impossible. The method we propose in this paper obtains more detailed road network data from the 1/10,000 map and from the DRMA data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract: The acquisition of digital road maps is essential for automobile navigation. Digital road maps are used in Japan by the Digital Road Map Association (DRMA) based on 1/25,000 map. The digital map, however, is unsuitable for town driving. In this situation a more detailed digital road map is requested by the users. To get a detailed digital road map from a paper map is a very expensive operation. We proposed a new method to obtain a more precise digital road map using a more detailed general purpose digital map which has already been published. However, using this digital map directly for auto-navigation is impossible. The method we propose in this paper obtains more detailed road network data from the 1/10,000 map and from the DRMA data.",
"fno": "74360497",
"keywords": [
"Cartography Computerised Navigation Driver Information Systems Automobiles Automated Highways Town And Country Planning Digital Road Maps Automobile Navigation Japan Digital Road Map Association DRMA Town Driving Paper Map General Purpose Digital Map Autonavigation Road Network Data"
],
"authors": [
{
"affiliation": "Dept. of Inf. & Comput. Sci., Saitama Univ., Urawa, Japan",
"fullName": "Y. Ohsawa",
"givenName": "Y.",
"surname": "Ohsawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Inf. & Comput. Sci., Saitama Univ., Urawa, Japan",
"fullName": "M. Miyazaki",
"givenName": "M.",
"surname": "Miyazaki",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmcs",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1996-06-01T00:00:00",
"pubType": "proceedings",
"pages": "0497",
"year": "1996",
"issn": null,
"isbn": "0-8186-7819-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "74360493",
"articleId": "12OmNwGZNIN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "74360501",
"articleId": "12OmNCmGNZf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBqMDBv",
"title": "Computer, Consumer and Control, International Symposium on",
"acronym": "is3c",
"groupId": "1801670",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz4SOAL",
"doi": "10.1109/IS3C.2012.35",
"title": "Ubiquitous Navigation Based on Physical Maps and GPS",
"normalizedTitle": "Ubiquitous Navigation Based on Physical Maps and GPS",
"abstract": "This work is an improvement of a navigating method which uses only a GPS receiver and a physical map photographed by the embedded camera on a smart phone to build up a ubiquitous navigation system. Instead of the ¡§Two Point Referencing¡¨ approach, a ¡§Three Point Referencing¡¨ method is proposed in this paper, which trades off the need of the ¡§extra¡¨ third reference point for the improvement of positional accuracy and the requirement of a north-up physical map. Thus it can apply to any physical maps, in particular, tourist maps in theme parks or cities which may not necessarily be north-up maps. Indeed, it is reported that the ¡§Two Point Referencing¡¨ scheme was feasible only to 81% of the maps they found.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work is an improvement of a navigating method which uses only a GPS receiver and a physical map photographed by the embedded camera on a smart phone to build up a ubiquitous navigation system. Instead of the ¡§Two Point Referencing¡¨ approach, a ¡§Three Point Referencing¡¨ method is proposed in this paper, which trades off the need of the ¡§extra¡¨ third reference point for the improvement of positional accuracy and the requirement of a north-up physical map. Thus it can apply to any physical maps, in particular, tourist maps in theme parks or cities which may not necessarily be north-up maps. Indeed, it is reported that the ¡§Two Point Referencing¡¨ scheme was feasible only to 81% of the maps they found.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work is an improvement of a navigating method which uses only a GPS receiver and a physical map photographed by the embedded camera on a smart phone to build up a ubiquitous navigation system. Instead of the ¡§Two Point Referencing¡¨ approach, a ¡§Three Point Referencing¡¨ method is proposed in this paper, which trades off the need of the ¡§extra¡¨ third reference point for the improvement of positional accuracy and the requirement of a north-up physical map. Thus it can apply to any physical maps, in particular, tourist maps in theme parks or cities which may not necessarily be north-up maps. Indeed, it is reported that the ¡§Two Point Referencing¡¨ scheme was feasible only to 81% of the maps they found.",
"fno": "4655a101",
"keywords": [
"GPS",
"Map",
"Positioning",
"Navigation"
],
"authors": [
{
"affiliation": null,
"fullName": "Yu-Ren Wang",
"givenName": "Yu-Ren",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chun-Yi Tsai",
"givenName": "Chun-Yi",
"surname": "Tsai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yao-Chung Chang",
"givenName": "Yao-Chung",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Horng-Chang Yang",
"givenName": "Horng-Chang",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ming-Chiao Chen",
"givenName": "Ming-Chiao",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "is3c",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "101-105",
"year": "2012",
"issn": null,
"isbn": "978-0-7695-4655-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4655a097",
"articleId": "12OmNwvVrNo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4655a106",
"articleId": "12OmNBOCWcE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2008/3268/0/3268a397",
"title": "Creating Local Geographies for Map-Based Cyber Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a397/12OmNApcufs",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/m2vip/1997/8025/0/80250169",
"title": "Inertial navigation aided with GPS information",
"doi": null,
"abstractUrl": "/proceedings-article/m2vip/1997/80250169/12OmNB06l37",
"parentPublication": {
"id": "proceedings/m2vip/1997/8025/0",
"title": "Mechatronics and Machine Vision in Practice, Annual Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2012/4736/0/4736a587",
"title": "Influence of Anchor Management on Anchored Navigation in Mobile Maps",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2012/4736a587/12OmNB8CiVg",
"parentPublication": {
"id": "proceedings/compsac/2012/4736/0",
"title": "2012 IEEE 36th Annual Computer Software and Applications Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieec/2009/3686/0/3686a599",
"title": "Research on Applications of LBS Based on Electronic Compass Assisted GPS",
"doi": null,
"abstractUrl": "/proceedings-article/ieec/2009/3686a599/12OmNBNM90z",
"parentPublication": {
"id": "proceedings/ieec/2009/3686/0",
"title": "2009 International Symposium on Information Engineering and Electronic Commerce (IEEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoip/2010/4252/2/4252b066",
"title": "One New Map Matching Model for Vehicle-Borne Navigation System",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252b066/12OmNCf1DnZ",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/culture-computing/2011/4546/0/4546a195",
"title": "Stroly: A Historic and Illustrated Maps Platform",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2011/4546a195/12OmNrAdsxf",
"parentPublication": {
"id": "proceedings/culture-computing/2011/4546/0",
"title": "International Conference on Culture and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecbs/2007/2772/0/27720397",
"title": "I-Navigate: Intelligent, Self-adapting Navigation Maps",
"doi": null,
"abstractUrl": "/proceedings-article/ecbs/2007/27720397/12OmNvH7fkP",
"parentPublication": {
"id": "proceedings/ecbs/2007/2772/0",
"title": "14th Annual IEEE International Conference and Workshops on the Engineering of Computer-Based Systems (ECBS'07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mvhi/2010/4009/0/4009a416",
"title": "Integrating GIS, GPS Technologies for Designing Vehicle Monitor System",
"doi": null,
"abstractUrl": "/proceedings-article/mvhi/2010/4009a416/12OmNwwd2JE",
"parentPublication": {
"id": "proceedings/mvhi/2010/4009/0",
"title": "Machine Vision and Human-machine Interface, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wimob/2008/3393/0/3393a221",
"title": "Sensitivity Analysis for GPS in Land-Vehicle Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/wimob/2008/3393a221/12OmNxQOjy4",
"parentPublication": {
"id": "proceedings/wimob/2008/3393/0",
"title": "2008 IEEE International Conference on Wireless and Mobile Computing, Networking and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/5/3336i582",
"title": "The Application of GPS/GIS Navigation and Positioning System in Cross-Country Orienteering",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336i582/12OmNzICEMK",
"parentPublication": {
"id": "proceedings/csse/2008/3336/5",
"title": "2008 International Conference on Computer Science and Software Engineering (CSSE 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmESLQ7F2U",
"doi": "10.1109/ICCV48922.2021.01169",
"title": "Beyond Road Extraction: A Dataset for Map Update using Aerial Images",
"normalizedTitle": "Beyond Road Extraction: A Dataset for Map Update using Aerial Images",
"abstract": "The increasing availability of satellite and aerial imagery has sparked substantial interest in automatically updating street maps by processing aerial images. Until now, the community has largely focused on road extraction, where road networks are inferred from scratch from an aerial image. However, given that relatively high-quality maps exist in most parts of the world, in practice, inference approaches must be applied to update existing maps rather than infer new ones. With recent road extraction methods showing high accuracy, we argue that it is time to transition to the more practical map update task, where an existing map is updated by adding, removing, and shifting roads, without introducing errors in parts of the existing map that remain up-to-date. In this paper, we develop a new dataset called MUNO21 for the map update task, and show that it poses several new and interesting research challenges. We evaluate several state-of-the-art road extraction methods on MUNO21, and find that substantial further improvements in accuracy will be needed to realize automatic map update.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The increasing availability of satellite and aerial imagery has sparked substantial interest in automatically updating street maps by processing aerial images. Until now, the community has largely focused on road extraction, where road networks are inferred from scratch from an aerial image. However, given that relatively high-quality maps exist in most parts of the world, in practice, inference approaches must be applied to update existing maps rather than infer new ones. With recent road extraction methods showing high accuracy, we argue that it is time to transition to the more practical map update task, where an existing map is updated by adding, removing, and shifting roads, without introducing errors in parts of the existing map that remain up-to-date. In this paper, we develop a new dataset called MUNO21 for the map update task, and show that it poses several new and interesting research challenges. We evaluate several state-of-the-art road extraction methods on MUNO21, and find that substantial further improvements in accuracy will be needed to realize automatic map update.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The increasing availability of satellite and aerial imagery has sparked substantial interest in automatically updating street maps by processing aerial images. Until now, the community has largely focused on road extraction, where road networks are inferred from scratch from an aerial image. However, given that relatively high-quality maps exist in most parts of the world, in practice, inference approaches must be applied to update existing maps rather than infer new ones. With recent road extraction methods showing high accuracy, we argue that it is time to transition to the more practical map update task, where an existing map is updated by adding, removing, and shifting roads, without introducing errors in parts of the existing map that remain up-to-date. In this paper, we develop a new dataset called MUNO21 for the map update task, and show that it poses several new and interesting research challenges. We evaluate several state-of-the-art road extraction methods on MUNO21, and find that substantial further improvements in accuracy will be needed to realize automatic map update.",
"fno": "281200l1885",
"keywords": [
"Computer Vision",
"Satellites",
"Roads",
"Benchmark Testing",
"Trajectory",
"Topology",
"Task Analysis",
"Vision Applications And Systems"
],
"authors": [
{
"affiliation": "MIT CSAIL",
"fullName": "Favyen Bastani",
"givenName": "Favyen",
"surname": "Bastani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIT CSAIL",
"fullName": "Sam Madden",
"givenName": "Sam",
"surname": "Madden",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "11885-11894",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200l1875",
"articleId": "1BmEUe5qD2E",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200l1895",
"articleId": "1BmJDSxT48E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391b689",
"title": "Enhancing Road Maps by Parsing Aerial Images Around the World",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b689/12OmNBOCWeM",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a448",
"title": "Road Change Detection from Multi-Spectral Aerial Data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a448/12OmNvlg8hk",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/comgeo/2013/5012/0/06602035",
"title": "Road Segmentation in Aerial Images by Exploiting Road Vector Data",
"doi": null,
"abstractUrl": "/proceedings-article/comgeo/2013/06602035/12OmNyen1kQ",
"parentPublication": {
"id": "proceedings/comgeo/2013/5012/0",
"title": "2013 4th International Conference on Computing for Geospatial Research & Application (COM.Geo)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d458",
"title": "DeepRoadMapper: Extracting Road Topology from Aerial Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d458/12OmNyv7m8s",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acv/1992/2840/0/00240327",
"title": "Interactive road finding for aerial images",
"doi": null,
"abstractUrl": "/proceedings-article/acv/1992/00240327/12OmNzxgHzK",
"parentPublication": {
"id": "proceedings/acv/1992/2840/0",
"title": "Proceedings IEEE Workshop on Applications of Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06774478",
"title": "Drawing Road Networks with Mental Maps",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06774478/13rRUwbs2b5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000e720",
"title": "RoadTracer: Automatic Extraction of Road Networks from Aerial Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000e720/17D45WgziOC",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bwcca/2010/4236/0/05633020",
"title": "A Road Location Estimating Method by Many Trajectories to Realize Rapid Map Survey for a Car Navigation System",
"doi": null,
"abstractUrl": "/proceedings-article/bwcca/2010/05633020/183rAfmup6p",
"parentPublication": {
"id": "proceedings/bwcca/2010/4236/0",
"title": "2010 International Conference on Broadband, Wireless Computing, Communication and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2019/7789/0/08679461",
"title": "Road Segment Interpolation for Incomplete Road Data",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2019/08679461/18Xkis4mchy",
"parentPublication": {
"id": "proceedings/bigcomp/2019/7789/0",
"title": "2019 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2020/2903/0/09101626",
"title": "Automatic Calibration of Road Intersection Topology using Trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2020/09101626/1kaMMZBeWZi",
"parentPublication": {
"id": "proceedings/icde/2020/2903/0",
"title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1G89Cfiy0bC",
"title": "2022 23rd IEEE International Conference on Mobile Data Management (MDM)",
"acronym": "mdm",
"groupId": "1000468",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G89KdgyIgw",
"doi": "10.1109/MDM55031.2022.00052",
"title": "A Geospatial Method for Detecting Map-Based Road Segment Discrepancies",
"normalizedTitle": "A Geospatial Method for Detecting Map-Based Road Segment Discrepancies",
"abstract": "Today, people's lives are enriched by the integration of electronic maps via smartphones. Electronic maps are required for a variety of commercial activities, such as catering, movie viewing, and tourism. Route planning and navigation are particularly intrinsically linked to electronic maps. As a result, it is critical that the roads on the electronic map are complete and accurate. At the present time, there are discrepancies between the map roads of various providers. This paper evaluates the roads on various map providers' maps. Due to the varied terrain depicted on the map, assessing the road properties can be challenging. Additionally, roads of varying thicknesses exist within a tile image, making it difficult to quantify the map's road lengths. This paper proposes a method for extracting road segments using an image binarization technique and employs edge erosion to assist in automatically computing the length of roads within maps. Throughout the paper, we provide comparison and statistical analysis on using our proposed road length detection model across map providers. Results show that our detection model can identify road length accurately and hence provide an overall measure of quality of maps.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Today, people's lives are enriched by the integration of electronic maps via smartphones. Electronic maps are required for a variety of commercial activities, such as catering, movie viewing, and tourism. Route planning and navigation are particularly intrinsically linked to electronic maps. As a result, it is critical that the roads on the electronic map are complete and accurate. At the present time, there are discrepancies between the map roads of various providers. This paper evaluates the roads on various map providers' maps. Due to the varied terrain depicted on the map, assessing the road properties can be challenging. Additionally, roads of varying thicknesses exist within a tile image, making it difficult to quantify the map's road lengths. This paper proposes a method for extracting road segments using an image binarization technique and employs edge erosion to assist in automatically computing the length of roads within maps. Throughout the paper, we provide comparison and statistical analysis on using our proposed road length detection model across map providers. Results show that our detection model can identify road length accurately and hence provide an overall measure of quality of maps.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Today, people's lives are enriched by the integration of electronic maps via smartphones. Electronic maps are required for a variety of commercial activities, such as catering, movie viewing, and tourism. Route planning and navigation are particularly intrinsically linked to electronic maps. As a result, it is critical that the roads on the electronic map are complete and accurate. At the present time, there are discrepancies between the map roads of various providers. This paper evaluates the roads on various map providers' maps. Due to the varied terrain depicted on the map, assessing the road properties can be challenging. Additionally, roads of varying thicknesses exist within a tile image, making it difficult to quantify the map's road lengths. This paper proposes a method for extracting road segments using an image binarization technique and employs edge erosion to assist in automatically computing the length of roads within maps. Throughout the paper, we provide comparison and statistical analysis on using our proposed road length detection model across map providers. Results show that our detection model can identify road length accurately and hence provide an overall measure of quality of maps.",
"fno": "517600a230",
"keywords": [
"Cartography",
"Driver Information Systems",
"Feature Extraction",
"Geographic Information Systems",
"Image Segmentation",
"Road Traffic",
"Road Vehicles",
"Roads",
"Statistical Analysis",
"Electronic Map",
"Map Roads",
"Map Providers",
"Road Properties",
"Road Segments",
"Road Length Detection Model",
"Map Based Road Segment Discrepancies",
"Image Segmentation",
"Statistical Analysis",
"Navigation",
"Roads",
"Computational Modeling",
"Image Edge Detection",
"Length Measurement",
"Map",
"Road",
"Comparison",
"Binarization",
"Edge Erosion"
],
"authors": [
{
"affiliation": "School of Engineering and Technology, University of Washington,Tacoma,USA",
"fullName": "Jiawei Yao",
"givenName": "Jiawei",
"surname": "Yao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Engineering and Technology, University of Washington,Tacoma,USA",
"fullName": "Eyhab Al-Masri",
"givenName": "Eyhab",
"surname": "Al-Masri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Engineering and Technology, University of Washington,Tacoma,USA",
"fullName": "Mohamed Ali",
"givenName": "Mohamed",
"surname": "Ali",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Corporation,Redmond,USA",
"fullName": "Vashutosh Agrawal",
"givenName": "Vashutosh",
"surname": "Agrawal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Corporation,Redmond,USA",
"fullName": "Ming Tan",
"givenName": "Ming",
"surname": "Tan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Corporation,Redmond,USA",
"fullName": "Harsh Govind",
"givenName": "Harsh",
"surname": "Govind",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Engineering and Technology, University of Washington,Tacoma,USA",
"fullName": "Adel Sabour",
"givenName": "Adel",
"surname": "Sabour",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Engineering and Technology, University of Washington,Tacoma,USA",
"fullName": "Abdulrahman Salama",
"givenName": "Abdulrahman",
"surname": "Salama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Engineering and Technology, University of Washington,Tacoma,USA",
"fullName": "Daniel Jiang",
"givenName": "Daniel",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Engineering and Technology, University of Washington,Tacoma,USA",
"fullName": "Reuben Keller",
"givenName": "Reuben",
"surname": "Keller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Engineering and Technology, University of Washington,Tacoma,USA",
"fullName": "Dino Jazvin",
"givenName": "Dino",
"surname": "Jazvin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Corporation,Redmond,USA",
"fullName": "Ravi Prakash",
"givenName": "Ravi",
"surname": "Prakash",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Corporation,Redmond,USA",
"fullName": "Egor Maresov",
"givenName": "Egor",
"surname": "Maresov",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mdm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "230-237",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5176-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "517600a222",
"articleId": "1G89F0advDq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "517600a238",
"articleId": "1G89Hu3DZU4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdar/2009/3725/0/3725a838",
"title": "A Method for Automatically Extracting Road Layers from Raster Maps",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2009/3725a838/12OmNANTArL",
"parentPublication": {
"id": "proceedings/icdar/2009/3725/0",
"title": "2009 10th International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2013/5096/0/5096a432",
"title": "Hierarchical Segment Learning Method for Road Objects Extraction and Classification",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2013/5096a432/12OmNAWH9ym",
"parentPublication": {
"id": "proceedings/cse/2013/5096/0",
"title": "2013 IEEE 16th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2018/3649/0/364901a426",
"title": "A Map Matching Algorithm for Complex Road Conditions Based on Base Station Data",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2018/364901a426/12OmNApu5hN",
"parentPublication": {
"id": "proceedings/bigcomp/2018/3649/0",
"title": "2018 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2012/4779/0/4779a082",
"title": "Estimating the Number of Lanes on Rapid Road Map Survey System Using GPS Trajectories as Collective Intelligence",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2012/4779a082/12OmNxbEtIl",
"parentPublication": {
"id": "proceedings/nbis/2012/4779/0",
"title": "2012 15th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2011/4458/0/4458a547",
"title": "Early Evaluation of Road Width Estimation on Rapid Road Map Survey System Using GPS Trajectories as Collective Intelligence",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2011/4458a547/12OmNxymo74",
"parentPublication": {
"id": "proceedings/nbis/2011/4458/0",
"title": "2011 14th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1988/0878/0/00028299",
"title": "An automatic road vector extraction method from maps",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028299/12OmNzXFoMa",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06774478",
"title": "Drawing Road Networks with Mental Maps",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06774478/13rRUwbs2b5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bwcca/2010/4236/0/05633020",
"title": "A Road Location Estimating Method by Many Trajectories to Realize Rapid Map Survey for a Car Navigation System",
"doi": null,
"abstractUrl": "/proceedings-article/bwcca/2010/05633020/183rAfmup6p",
"parentPublication": {
"id": "proceedings/bwcca/2010/4236/0",
"title": "2010 International Conference on Broadband, Wireless Computing, Communication and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1885",
"title": "Beyond Road Extraction: A Dataset for Map Update using Aerial Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1885/1BmESLQ7F2U",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2022/05/09347692",
"title": "RCIVMM: A Route Choice-Based Interactive Voting Map Matching Approach for Complex Urban Road Networks",
"doi": null,
"abstractUrl": "/journal/bd/2022/05/09347692/1qWHcWcel8I",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1kaMxDONP0Y",
"title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"acronym": "icde",
"groupId": "1000178",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1kaMMZBeWZi",
"doi": "10.1109/ICDE48307.2020.00145",
"title": "Automatic Calibration of Road Intersection Topology using Trajectories",
"normalizedTitle": "Automatic Calibration of Road Intersection Topology using Trajectories",
"abstract": "The inaccuracy of road intersection in digital road map easily brings serious effects on the mobile navigation and other applications. Massive traveling trajectories of thousands of vehicles enable frequent updating of road intersection topology. In this paper, we first expand the road intersection detection issue into a topology calibration problem for road intersection influence zone. Distinct from the existing road intersection update methods, we not only determine the location and coverage of road intersection, but figure out incorrect or missing turning paths within whole influence zone based on unmatched trajectories as compared to the existing map. The important challenges of calibration issue include that trajectories are mixing with exceptional data, and road intersections are of different sizes and shapes, etc. To address above challenges, we propose a three-phase calibration framework, called CITT. It is composed of trajectory quality improving, core zone detection, and topology calibration within road intersection influence zone. From such components it can automatically obtain high quality topology of road intersection influence zone. Extensive experiments compared with the state-of-the-art methods using trajectory data obtained from Didi Chuxing and Chicago campus shuttles demonstrate that CITT method has strong stability and robustness and significantly outperforms the existing methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The inaccuracy of road intersection in digital road map easily brings serious effects on the mobile navigation and other applications. Massive traveling trajectories of thousands of vehicles enable frequent updating of road intersection topology. In this paper, we first expand the road intersection detection issue into a topology calibration problem for road intersection influence zone. Distinct from the existing road intersection update methods, we not only determine the location and coverage of road intersection, but figure out incorrect or missing turning paths within whole influence zone based on unmatched trajectories as compared to the existing map. The important challenges of calibration issue include that trajectories are mixing with exceptional data, and road intersections are of different sizes and shapes, etc. To address above challenges, we propose a three-phase calibration framework, called CITT. It is composed of trajectory quality improving, core zone detection, and topology calibration within road intersection influence zone. From such components it can automatically obtain high quality topology of road intersection influence zone. Extensive experiments compared with the state-of-the-art methods using trajectory data obtained from Didi Chuxing and Chicago campus shuttles demonstrate that CITT method has strong stability and robustness and significantly outperforms the existing methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The inaccuracy of road intersection in digital road map easily brings serious effects on the mobile navigation and other applications. Massive traveling trajectories of thousands of vehicles enable frequent updating of road intersection topology. In this paper, we first expand the road intersection detection issue into a topology calibration problem for road intersection influence zone. Distinct from the existing road intersection update methods, we not only determine the location and coverage of road intersection, but figure out incorrect or missing turning paths within whole influence zone based on unmatched trajectories as compared to the existing map. The important challenges of calibration issue include that trajectories are mixing with exceptional data, and road intersections are of different sizes and shapes, etc. To address above challenges, we propose a three-phase calibration framework, called CITT. It is composed of trajectory quality improving, core zone detection, and topology calibration within road intersection influence zone. From such components it can automatically obtain high quality topology of road intersection influence zone. Extensive experiments compared with the state-of-the-art methods using trajectory data obtained from Didi Chuxing and Chicago campus shuttles demonstrate that CITT method has strong stability and robustness and significantly outperforms the existing methods.",
"fno": "09101626",
"keywords": [
"Calibration",
"Cartography",
"Data Mining",
"Object Detection",
"Road Traffic",
"Road Vehicles",
"Roads",
"Traffic Engineering Computing",
"Road Intersection Influence Zone",
"Road Intersection Topology",
"Digital Road Map",
"Road Intersection Detection Issue",
"Topology Calibration Problem",
"Road Intersection Update Methods",
"Mobile Navigation",
"Traveling Trajectories",
"Turning Paths",
"Three Phase Calibration Framework",
"CITT",
"Core Zone Detection",
"Roads",
"Trajectory",
"Topology",
"Turning",
"Calibration",
"Network Topology",
"Navigation",
"Road Intersection",
"Influence Zone",
"Core Zone",
"Quality Improving",
"Centerline Fitting"
],
"authors": [
{
"affiliation": "East China Normal University",
"fullName": "Lisheng Zhao",
"givenName": "Lisheng",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "East China Normal University",
"fullName": "Jiali Mao",
"givenName": "Jiali",
"surname": "Mao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "East China Normal University",
"fullName": "Min Pu",
"givenName": "Min",
"surname": "Pu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Didi Chuxing",
"fullName": "Guoping Liu",
"givenName": "Guoping",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "East China Normal University",
"fullName": "Cheqing Jin",
"givenName": "Cheqing",
"surname": "Jin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "East China Normal University",
"fullName": "Weining Qian",
"givenName": "Weining",
"surname": "Qian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "East China Normal University",
"fullName": "Aoying Zhou",
"givenName": "Aoying",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Didi Chuxing",
"fullName": "Xiang Wen",
"givenName": "Xiang",
"surname": "Wen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Didi Chuxing",
"fullName": "Runbo Hu",
"givenName": "Runbo",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Didi Chuxing",
"fullName": "Hua Chai",
"givenName": "Hua",
"surname": "Chai",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icde",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1633-1644",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-2903-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09101499",
"articleId": "1kaMDxXmqkw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09101784",
"articleId": "1kaMGB3kDJe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icoin/2018/2290/0/08343159",
"title": "Reservation-based cooperative traffic management at an intersection of multi-lane roads",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2018/08343159/12OmNAndik0",
"parentPublication": {
"id": "proceedings/icoin/2018/2290/0",
"title": "2018 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imvip/2011/0230/0/06167880",
"title": "Comparison of Camera Calibration Techniques for a Portable Mobile Mapping System",
"doi": null,
"abstractUrl": "/proceedings-article/imvip/2011/06167880/12OmNwcCIXS",
"parentPublication": {
"id": "proceedings/imvip/2011/0230/0",
"title": "2011 Irish Machine Vision and Image Processing Conference (IMVIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d458",
"title": "DeepRoadMapper: Extracting Road Topology from Aerial Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d458/12OmNyv7m8s",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0/08328536",
"title": "Distributed Algorithm for Geographic Opportunistic Routing in VANETs at Road Intersection",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2017/08328536/17D45XeKgpv",
"parentPublication": {
"id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0",
"title": "2017 IEEE 15th Intl Conf on Dependable, Autonomic and Secure Computing, 15th Intl Conf on Pervasive Intelligence and Computing, 3rd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7242",
"title": "Topology Preserving Local Road Network Estimation from Single Onboard Camera Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7242/1H0Nq5J2rHa",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2022/7180/0/718000a264",
"title": "SSTGCN: A Deep Learning Framework for Road Intersection Similarity Learning",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2022/718000a264/1JeEm3a0iNq",
"parentPublication": {
"id": "proceedings/mass/2022/7180/0",
"title": "2022 IEEE 19th International Conference on Mobile Ad Hoc and Smart Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020759",
"title": "Learning Latent Road Correlations from Trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020759/1KfTaomjtw4",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2019/12/08747454",
"title": "Graph Filter: Enabling Efficient Topology Calibration",
"doi": null,
"abstractUrl": "/journal/td/2019/12/08747454/1bcHvaYGOUU",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2019/3363/0/336300a288",
"title": "Road Intersection Detection Based on Direction Ratio Statistics Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2019/336300a288/1ckrLvYzXxe",
"parentPublication": {
"id": "proceedings/mdm/2019/3363/0",
"title": "2019 20th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900a985",
"title": "Enabling Self-defined Navigation on Road Graph via Double Rewarded Generalized VIN",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900a985/1t7mSoDrEOI",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0",
"title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzwpUa6",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"acronym": "icmtma",
"groupId": "1002837",
"volume": "1",
"displayVolume": "1",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB836N0",
"doi": "10.1109/ICMTMA.2010.158",
"title": "A Study of Smoothing Implementation in Adaptive Federate Interpolation Based on NURBS Curve",
"normalizedTitle": "A Study of Smoothing Implementation in Adaptive Federate Interpolation Based on NURBS Curve",
"abstract": "The paper proposes smoothing adaptive NURBS curve interpolation algorithm, which is based on the forward-looking technology acceleration and jerk constraints, to adaptively adjust feed rate according to the requirements of interpolation accuracy. NURBS curve is divided into curve segments by sharp corners, and by using the corresponding acceleration and 5 paragraphs S deceleration control which acceleration limits, feeding rate of each curve segment is re-planned. So smooth speed transition curve is got to enhance the accuracy and efficiency of NC machining.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The paper proposes smoothing adaptive NURBS curve interpolation algorithm, which is based on the forward-looking technology acceleration and jerk constraints, to adaptively adjust feed rate according to the requirements of interpolation accuracy. NURBS curve is divided into curve segments by sharp corners, and by using the corresponding acceleration and 5 paragraphs S deceleration control which acceleration limits, feeding rate of each curve segment is re-planned. So smooth speed transition curve is got to enhance the accuracy and efficiency of NC machining.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The paper proposes smoothing adaptive NURBS curve interpolation algorithm, which is based on the forward-looking technology acceleration and jerk constraints, to adaptively adjust feed rate according to the requirements of interpolation accuracy. NURBS curve is divided into curve segments by sharp corners, and by using the corresponding acceleration and 5 paragraphs S deceleration control which acceleration limits, feeding rate of each curve segment is re-planned. So smooth speed transition curve is got to enhance the accuracy and efficiency of NC machining.",
"fno": "3962a357",
"keywords": [
"NURBS",
"Interpolation",
"Sharp Corners",
"S Curve Velocity Profile",
"Smoothing Implementation"
],
"authors": [
{
"affiliation": null,
"fullName": "An-Jiang Cai",
"givenName": "An-Jiang",
"surname": "Cai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shi-Hong Guo",
"givenName": "Shi-Hong",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hai-Tao Zhang",
"givenName": "Hai-Tao",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hong-Wei Guo",
"givenName": "Hong-Wei",
"surname": "Guo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmtma",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-03-01T00:00:00",
"pubType": "proceedings",
"pages": "357-360",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-3962-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3962a352",
"articleId": "12OmNz6iOi7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3962a361",
"articleId": "12OmNzYwbSA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdma/2010/4286/1/4286a729",
"title": "Feedrate Profile Planning Based on Sensitive Points Identification in NURBS Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2010/4286a729/12OmNAndiph",
"parentPublication": {
"id": "proceedings/icdma/2010/4286/1",
"title": "2010 International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/3/3507c216",
"title": "A Computer Numerical Controlled System with NURBS Interpolator",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507c216/12OmNCctfhg",
"parentPublication": {
"id": "proceedings/csie/2009/3507/3",
"title": "Computer Science and Information Engineering, World Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a259",
"title": "Particle Swarm Optimization for NURBS Curve Fitting",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a259/12OmNCuDzuR",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icic/2011/688/0/05954509",
"title": "Application of NURBS Curve Interpolation Algorithm in Steam Turbine Blade NC Machining",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2011/05954509/12OmNqJHFBS",
"parentPublication": {
"id": "proceedings/icic/2011/688/0",
"title": "2011 Fourth International Conference on Information and Computing (ICIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2010/4166/0/4166a073",
"title": "NURBS Curve Approximation Using Particle Swarm Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2010/4166a073/12OmNx4yvFV",
"parentPublication": {
"id": "proceedings/cgiv/2010/4166/0",
"title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a080",
"title": "Research on a New Linear Interpolation Algorithm of NURBS Curve",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a080/12OmNxwWoum",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2009/3583/3/3583c273",
"title": "Research and Implementation of NURBS Real-Time and Look-Ahead Interpolation Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583c273/12OmNyGtjpT",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsem/2010/4223/1/4223a070",
"title": "Nurbs Interpolation Method for Complicated Curved Surface",
"doi": null,
"abstractUrl": "/proceedings-article/icsem/2010/4223a070/12OmNyL0Txw",
"parentPublication": {
"id": "proceedings/icsem/2010/4223/1",
"title": "2010 International Conference on System Science, Engineering Design and Manufacturing Informatization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2007/2994/2/29940059",
"title": "Efficient Spline Interpolation Curve Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2007/29940059/12OmNyUWQXU",
"parentPublication": {
"id": "proceedings/iih-msp/2007/2994/2",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2018/8481/0/848100a097",
"title": "Linear Motor Platform Contouring Control Based on NURBS Curve Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2018/848100a097/17D45XwUAKx",
"parentPublication": {
"id": "proceedings/icmcce/2018/8481/0",
"title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBSBk6A",
"title": "2014 Ninth International Conference on P2P, Parallel, Grid, Cloud and Internet Computing (3PGCIC)",
"acronym": "3pgcic",
"groupId": "1800224",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCdk2uJ",
"doi": "10.1109/3PGCIC.2014.96",
"title": "Optimizational Method of HBase Multi-dimensional Data Query Based on Hilbert Space-Filling Curve",
"normalizedTitle": "Optimizational Method of HBase Multi-dimensional Data Query Based on Hilbert Space-Filling Curve",
"abstract": "Base distributed database technology has been widely used in missive data processing. The problem of the efficiency of multi-dimensional data query which is caused by its single primary key indexing becomes more apparent. This paper proposed and implemented a multi-dimensional query method based on Hilbert space-filling curve. Using the Hilbert space filling curve to make the multi-dimensional data space to be one-dimensional lossless compression, on the basis of mapping the query conditions to the multi-dimensional space, and then using the subspace match to generate Hilbert segment, thereby convert into a single dimension query. Finally, the experiments prove that this method can query the keyword of multi-dimensional space more efficiently with the massive data and has a good load balancing performance. And this method can be more effective to avoid the issue of the server cluster hotspot.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Base distributed database technology has been widely used in missive data processing. The problem of the efficiency of multi-dimensional data query which is caused by its single primary key indexing becomes more apparent. This paper proposed and implemented a multi-dimensional query method based on Hilbert space-filling curve. Using the Hilbert space filling curve to make the multi-dimensional data space to be one-dimensional lossless compression, on the basis of mapping the query conditions to the multi-dimensional space, and then using the subspace match to generate Hilbert segment, thereby convert into a single dimension query. Finally, the experiments prove that this method can query the keyword of multi-dimensional space more efficiently with the massive data and has a good load balancing performance. And this method can be more effective to avoid the issue of the server cluster hotspot.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Base distributed database technology has been widely used in missive data processing. The problem of the efficiency of multi-dimensional data query which is caused by its single primary key indexing becomes more apparent. This paper proposed and implemented a multi-dimensional query method based on Hilbert space-filling curve. Using the Hilbert space filling curve to make the multi-dimensional data space to be one-dimensional lossless compression, on the basis of mapping the query conditions to the multi-dimensional space, and then using the subspace match to generate Hilbert segment, thereby convert into a single dimension query. Finally, the experiments prove that this method can query the keyword of multi-dimensional space more efficiently with the massive data and has a good load balancing performance. And this method can be more effective to avoid the issue of the server cluster hotspot.",
"fno": "4171a469",
"keywords": [
"Distributed Databases",
"Servers",
"Encoding",
"Educational Institutions",
"Load Management",
"Indexes",
"Availability",
"Hilbert Space Filling Curve",
"Hadoop",
"Component",
"Multi Dimensional Data Query"
],
"authors": [
{
"affiliation": null,
"fullName": "Qingcheng Li",
"givenName": "Qingcheng",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ye Lu",
"givenName": "Ye",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaoli Gong",
"givenName": "Xiaoli",
"surname": "Gong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jin Zhang",
"givenName": "Jin",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3pgcic",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-11-01T00:00:00",
"pubType": "proceedings",
"pages": "469-474",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4171-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4171a463",
"articleId": "12OmNCcKQv8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4171a475",
"articleId": "12OmNyeWdOg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1994/6952/1/00413392",
"title": "A class of fast algorithms for the Peano-Hilbert space-filling curve",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413392/12OmNB1NVNn",
"parentPublication": {
"id": "proceedings/icip/1994/6952/3",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scc/2014/5066/0/5066a131",
"title": "A Density-Based Space Filling Curve for Location Privacy-Preserving",
"doi": null,
"abstractUrl": "/proceedings-article/scc/2014/5066a131/12OmNBcShUv",
"parentPublication": {
"id": "proceedings/scc/2014/5066/0",
"title": "2014 IEEE International Conference on Services Computing (SCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicse/2010/4339/0/4339a085",
"title": "A k-Closest-Pair Query Algorithm Based on Grid Partition of Hilbert Curve",
"doi": null,
"abstractUrl": "/proceedings-article/icicse/2010/4339a085/12OmNCwCLp6",
"parentPublication": {
"id": "proceedings/icicse/2010/4339/0",
"title": "2010 Fifth International Conference on Internet Computing for Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460162",
"title": "Mapping high dimensional features onto Hilbert curve: Applying to fast image retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460162/12OmNqBtiSg",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpp/2003/2017/0/20170099",
"title": "Tensor Product Formulation for Hilbert Space-Filling Curves",
"doi": null,
"abstractUrl": "/proceedings-article/icpp/2003/20170099/12OmNxwENsq",
"parentPublication": {
"id": "proceedings/icpp/2003/2017/0",
"title": "2003 International Conference on Parallel Processing, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicis/2011/1561/0/06063312",
"title": "An Approximate Nearest Neighbor Query Algorithm Based on Hilbert Curve",
"doi": null,
"abstractUrl": "/proceedings-article/icicis/2011/06063312/12OmNyugyLg",
"parentPublication": {
"id": "proceedings/icicis/2011/1561/0",
"title": "2011 International Conference on Internet Computing and Information Services (ICICIS 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2001/01/k0124",
"title": "Analysis of the Clustering Properties of the Hilbert Space-Filling Curve",
"doi": null,
"abstractUrl": "/journal/tk/2001/01/k0124/13rRUwhHcJE",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2018/5520/0/552000b236",
"title": "Onion Curve: A Space Filling Curve with Near-Optimal Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2018/552000b236/14Fq0Ud02xH",
"parentPublication": {
"id": "proceedings/icde/2018/5520/0",
"title": "2018 IEEE 34th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2017/2652/0/2652b493",
"title": "Differential Private-Hilbert: Data Publication Using Hilbert Curve Spatial Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2017/2652b493/17D45W9KVJL",
"parentPublication": {
"id": "proceedings/csci/2017/2652/0",
"title": "2017 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcloud/2019/5506/0/09091395",
"title": "Efficient Spatial Big Data Storage and Query in HBase",
"doi": null,
"abstractUrl": "/proceedings-article/smartcloud/2019/09091395/1jPb8gAAjFC",
"parentPublication": {
"id": "proceedings/smartcloud/2019/5506/0",
"title": "2019 IEEE International Conference on Smart Cloud (SmartCloud)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqBtiEH",
"title": "Computer Science-Technology and Applications, International Forum on",
"acronym": "ifcsta",
"groupId": "1003083",
"volume": "2",
"displayVolume": "2",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrIrPfJ",
"doi": "10.1109/IFCSTA.2009.181",
"title": "A Remote Sensing Images Fusion Based on the NLEMD and Local Contrast",
"normalizedTitle": "A Remote Sensing Images Fusion Based on the NLEMD and Local Contrast",
"abstract": "A remote sensing images fusion algorithm based on the NLEMD (NLEMD, neighborhood limited empirical mode decomposition) and local contrast is presented in this paper. At first the original image is performed a multi-resolution decomposition by using NLEMD. Then a fusing algorithm is presented according to the truth that the human visual system is sensitive to the local contrast. During NLEMD, the local average stationarity is used to get the optimal local average of image, which is avoided extracting the extrema of image in the traditional EMD. The experiment results show that our algorithm can reflect more complex textures than the DWT method could. This algorithm can get better fused image.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A remote sensing images fusion algorithm based on the NLEMD (NLEMD, neighborhood limited empirical mode decomposition) and local contrast is presented in this paper. At first the original image is performed a multi-resolution decomposition by using NLEMD. Then a fusing algorithm is presented according to the truth that the human visual system is sensitive to the local contrast. During NLEMD, the local average stationarity is used to get the optimal local average of image, which is avoided extracting the extrema of image in the traditional EMD. The experiment results show that our algorithm can reflect more complex textures than the DWT method could. This algorithm can get better fused image.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A remote sensing images fusion algorithm based on the NLEMD (NLEMD, neighborhood limited empirical mode decomposition) and local contrast is presented in this paper. At first the original image is performed a multi-resolution decomposition by using NLEMD. Then a fusing algorithm is presented according to the truth that the human visual system is sensitive to the local contrast. During NLEMD, the local average stationarity is used to get the optimal local average of image, which is avoided extracting the extrema of image in the traditional EMD. The experiment results show that our algorithm can reflect more complex textures than the DWT method could. This algorithm can get better fused image.",
"fno": "3930b246",
"keywords": [
"Empirical Mode Decomposition",
"Neighborhood Limit",
"Local Average Stationarity",
"Image Fusion",
"Intrinsic Mode Function"
],
"authors": [
{
"affiliation": null,
"fullName": "Zejun Zhang",
"givenName": "Zejun",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaowei Chen",
"givenName": "Xiaowei",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ifcsta",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-12-01T00:00:00",
"pubType": "proceedings",
"pages": "246-249",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3930-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3930b243",
"articleId": "12OmNA0dMMh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3930b250",
"articleId": "12OmNz61dbx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2002/1695/3/169530915",
"title": "Entropy Optimized Contrast Stretch to Enhance Remote Sensing Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169530915/12OmNAlvHCY",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/3",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831314",
"title": "Shape preserving local contrast enhancement",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831314/12OmNBUS7al",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icds/2009/3526/0/3526a294",
"title": "Local Contrast Segmentation to Binarize Images",
"doi": null,
"abstractUrl": "/proceedings-article/icds/2009/3526a294/12OmNBlofSQ",
"parentPublication": {
"id": "proceedings/icds/2009/3526/0",
"title": "International Conference on the Digital Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2007/3122/0/3122a493",
"title": "Contrast Enhancement Scheme Integrating Global and Local Contrast Equalization Approaches",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2007/3122a493/12OmNqJHFAj",
"parentPublication": {
"id": "proceedings/sitis/2007/3122/0",
"title": "2007 Third International IEEE Conference on Signal-Image Technologies and Internet-Based System",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aici/2009/3816/3/3816c530",
"title": "NonSubsampled Contourlet Transform Combined with Energy Entropy for Remote Sensing Image Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/aici/2009/3816c530/12OmNvnwVq9",
"parentPublication": {
"id": "proceedings/aici/2009/3816/3",
"title": "2009 International Conference on Artificial Intelligence and Computational Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccct/2012/3149/0/06394687",
"title": "SVD Based Poor Contrast Improvement of Blurred Multispectral Remote Sensing Satellite Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccct/2012/06394687/12OmNwbLVry",
"parentPublication": {
"id": "proceedings/iccct/2012/3149/0",
"title": "2012 3rd International Conference on Computer and Communication Technology (ICCCT 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmit/2010/4008/2/4008b051",
"title": "Improvement of Image Contrast with Local Adaptation",
"doi": null,
"abstractUrl": "/proceedings-article/mmit/2010/4008b051/12OmNy50gap",
"parentPublication": {
"id": "proceedings/mmit/2010/4008/2",
"title": "MultiMedia and Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asia/2009/3910/0/3910a120",
"title": "Regional Contrast Fuzzy Enhancement for Remote Sensing Image Based on the Generalized Fuzzy Set in Nonsubsampled Contourlet Domain",
"doi": null,
"abstractUrl": "/proceedings-article/asia/2009/3910a120/12OmNylbozy",
"parentPublication": {
"id": "proceedings/asia/2009/3910/0",
"title": "2009 International Asia Symposium on Intelligent Interaction and Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isads/2005/8963/0/01452090",
"title": "Remote sensing image data fusion based on local deviation of wavelet packet transform",
"doi": null,
"abstractUrl": "/proceedings-article/isads/2005/01452090/12OmNzhELf5",
"parentPublication": {
"id": "proceedings/isads/2005/8963/0",
"title": "Proceedings. ISADS 2005. 2005 International Symposium on Autonomous Decentralized Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034d023",
"title": "Global and Local Contrast Adaptive Enhancement for Non-uniform Illumination Color Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034d023/12OmNzwHvp4",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy5hRcX",
"title": "Information Technology and Computer Science, International Conference on",
"acronym": "itcs",
"groupId": "1002810",
"volume": "1",
"displayVolume": "1",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwI8cbJ",
"doi": "10.1109/ITCS.2009.44",
"title": "Application of EMD in the Vehicle Braking Performance Inspection System",
"normalizedTitle": "Application of EMD in the Vehicle Braking Performance Inspection System",
"abstract": "To achieve the goal of full procedural detection of the changing of vehicle braking force, and to improve the accuracy and repeatability of the inspection system, after analyzing several commonly used data smoothing methods, we propose applying empirical modal decomposition to smoothing of car braking force data. Simulation and experimental results show that the method can weaken the effect caused by disturbance, and improve curve smoothness. It also can preserve the changing patterns of the original curve, which guarantees system detection accuracy and repeatability. This proves the effectiveness and feasibility of the proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To achieve the goal of full procedural detection of the changing of vehicle braking force, and to improve the accuracy and repeatability of the inspection system, after analyzing several commonly used data smoothing methods, we propose applying empirical modal decomposition to smoothing of car braking force data. Simulation and experimental results show that the method can weaken the effect caused by disturbance, and improve curve smoothness. It also can preserve the changing patterns of the original curve, which guarantees system detection accuracy and repeatability. This proves the effectiveness and feasibility of the proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To achieve the goal of full procedural detection of the changing of vehicle braking force, and to improve the accuracy and repeatability of the inspection system, after analyzing several commonly used data smoothing methods, we propose applying empirical modal decomposition to smoothing of car braking force data. Simulation and experimental results show that the method can weaken the effect caused by disturbance, and improve curve smoothness. It also can preserve the changing patterns of the original curve, which guarantees system detection accuracy and repeatability. This proves the effectiveness and feasibility of the proposed method.",
"fno": "3688a184",
"keywords": [
"Automobile Engineering",
"Braking Performance",
"Smoothing",
"Empirical Modal Decomposition"
],
"authors": [
{
"affiliation": null,
"fullName": "Guo Lanying",
"givenName": "Guo",
"surname": "Lanying",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Liang Bo",
"givenName": "Liang",
"surname": "Bo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhao Xiangmo",
"givenName": "Zhao",
"surname": "Xiangmo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dong Anguo",
"givenName": "Dong",
"surname": "Anguo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "itcs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-07-01T00:00:00",
"pubType": "proceedings",
"pages": "184-187",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3688-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3688a179",
"articleId": "12OmNBE7Muv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3688a188",
"articleId": "12OmNzxyiDr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2013/4932/0/4932a021",
"title": "A Method for Distinguishing the Braking Situation of the Vehicle in Vehicle-Pedestrian Accidents",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2013/4932a021/12OmNANBZtu",
"parentPublication": {
"id": "proceedings/icmtma/2013/4932/0",
"title": "2013 Fifth International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imccc/2011/4519/0/4519a966",
"title": "Research on Efficiency Optimization for Hydrostatic and Hydraulic Auxiliary Braking System of Tracked Vehicle",
"doi": null,
"abstractUrl": "/proceedings-article/imccc/2011/4519a966/12OmNAkWvpw",
"parentPublication": {
"id": "proceedings/imccc/2011/4519/0",
"title": "Instrumentation, Measurement, Computer, Communication and Control, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/6/3736f458",
"title": "The Research of Regenerative Braking Control Strategy for Advanced Braking Force Distribution",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736f458/12OmNBbsieH",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/6",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdciem/2011/4350/0/4350b358",
"title": "Study on Braking Stability of Electro-mechanical Hybrid Braking System in Electric Vehicles Based on ECE Regulation",
"doi": null,
"abstractUrl": "/proceedings-article/cdciem/2011/4350b358/12OmNC8dgih",
"parentPublication": {
"id": "proceedings/cdciem/2011/4350/0",
"title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031a940",
"title": "Experimental Research on H8 Control for Regenerative Braking of Electric Vehicle",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031a940/12OmNCgJe7R",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031a524",
"title": "Automobile Simulation Research on Anti-lock Braking System Based on LabVIEW",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031a524/12OmNwHyZWU",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/1/3962a759",
"title": "Braking Force Distribution Strategy for HEV Based on Braking Strength",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962a759/12OmNwNwzLy",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/1",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdciem/2012/4639/0/4639a065",
"title": "A Series Regenerative Braking Control Strategy Based on Hybrid-Power",
"doi": null,
"abstractUrl": "/proceedings-article/cdciem/2012/4639a065/12OmNx7ouRz",
"parentPublication": {
"id": "proceedings/cdciem/2012/4639/0",
"title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ihmsc/2010/4151/2/4151b124",
"title": "The Application of Fuzzy Logic in Regenerative Braking of EV",
"doi": null,
"abstractUrl": "/proceedings-article/ihmsc/2010/4151b124/12OmNxXUhQb",
"parentPublication": {
"id": "proceedings/ihmsc/2010/4151/2",
"title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccie/2010/4026/1/4026a439",
"title": "Simulation Study of H8 Control for Regenerative Braking of Electric Vehicle",
"doi": null,
"abstractUrl": "/proceedings-article/ccie/2010/4026a439/12OmNy5zsqC",
"parentPublication": {
"id": "proceedings/ccie/2010/4026/1",
"title": "Computing, Control and Industrial Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBp52yv",
"title": "2014 Seventh International Conference on Contemporary Computing (IC3)",
"acronym": "ic3",
"groupId": "1803947",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwoPtBp",
"doi": "10.1109/IC3.2014.6897237",
"title": "Multi-resolution Local extrema patterns using discrete wavelet transform",
"normalizedTitle": "Multi-resolution Local extrema patterns using discrete wavelet transform",
"abstract": "Content based image retrieval is grievous need of present scenario in digital imaging world. This work presents a new multi-scale content based image retrieval system which leverages the multi-resolution property of discrete wavelet transform (DWT) and the local information attribute of local extrema patterns (LEPs). Two level DWT is applied on images and wavelet coefficients are obtained for images, further the LEPs are collected from wavelet coefficients to extract the feature vector. The proposed method abbreviated as DWT+LEP and tested on two benchmark databases for validation and compared with local extrema patterns (LEPs), discrete wavelet transform (DWT), center-symmetric local binary pattern (CS LBP), local edge pattern for image retrieval (LEPINV), local edge pattern for segmentation (LEPSEG), local binary pattern (LBP) and block based local binary pattern (BLK LBP).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Content based image retrieval is grievous need of present scenario in digital imaging world. This work presents a new multi-scale content based image retrieval system which leverages the multi-resolution property of discrete wavelet transform (DWT) and the local information attribute of local extrema patterns (LEPs). Two level DWT is applied on images and wavelet coefficients are obtained for images, further the LEPs are collected from wavelet coefficients to extract the feature vector. The proposed method abbreviated as DWT+LEP and tested on two benchmark databases for validation and compared with local extrema patterns (LEPs), discrete wavelet transform (DWT), center-symmetric local binary pattern (CS LBP), local edge pattern for image retrieval (LEPINV), local edge pattern for segmentation (LEPSEG), local binary pattern (LBP) and block based local binary pattern (BLK LBP).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Content based image retrieval is grievous need of present scenario in digital imaging world. This work presents a new multi-scale content based image retrieval system which leverages the multi-resolution property of discrete wavelet transform (DWT) and the local information attribute of local extrema patterns (LEPs). Two level DWT is applied on images and wavelet coefficients are obtained for images, further the LEPs are collected from wavelet coefficients to extract the feature vector. The proposed method abbreviated as DWT+LEP and tested on two benchmark databases for validation and compared with local extrema patterns (LEPs), discrete wavelet transform (DWT), center-symmetric local binary pattern (CS LBP), local edge pattern for image retrieval (LEPINV), local edge pattern for segmentation (LEPSEG), local binary pattern (LBP) and block based local binary pattern (BLK LBP).",
"fno": "06897237",
"keywords": [
"Discrete Wavelet Transforms",
"Feature Extraction",
"Image Retrieval",
"Histograms",
"Local Binary Pattern",
"Multi Resolution",
"Discrete Wavelet Transform",
"Local Exterma Pattern",
"Local Edge Pattern"
],
"authors": [
{
"affiliation": "Department of Mathematics, Indian Institute of Technology Roorkee, India",
"fullName": "Manisha Verma",
"givenName": "Manisha",
"surname": "Verma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Indian Institute of Technology Roorkee, India",
"fullName": "Balasubramanian Raman",
"givenName": "Balasubramanian",
"surname": "Raman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Electrical and Computer Engineering, University of Windsor, Ontario, Canada",
"fullName": "Subrahmanyam Murala",
"givenName": "Subrahmanyam",
"surname": "Murala",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ic3",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "577-582",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-5172-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06897236",
"articleId": "12OmNx1qV1k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06897238",
"articleId": "12OmNAoDigh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icsip/2014/5100/0/5100a231",
"title": "Performance Evaluation of Various Feature Extraction Techniques with Special Reference to Hand Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icsip/2014/5100a231/12OmNBa2iAb",
"parentPublication": {
"id": "proceedings/icsip/2014/5100/0",
"title": "2014 Fifth International Conference on Signal and Image Processing (ICSIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acct/2013/4941/0/06524275",
"title": "Analysis of Multispectral Image Using Discrete Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/acct/2013/06524275/12OmNCcbE9D",
"parentPublication": {
"id": "proceedings/acct/2013/4941/0",
"title": "2013 Third International Conference on Advanced Computing & Communication Technologies (ACCT 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2009/3781/0/05090868",
"title": "A flexible floating-point wavelet transform and wavelet packet processor",
"doi": null,
"abstractUrl": "/proceedings-article/date/2009/05090868/12OmNqEAT6t",
"parentPublication": {
"id": "proceedings/date/2009/3781/0",
"title": "2009 Design, Automation & Test in Europe Conference & Exhibition (DATE'09)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2015/8342/0/07057914",
"title": "New interpolation method based on combination of Discrete cosine transform and wavelet transform",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2015/07057914/12OmNvAiSNi",
"parentPublication": {
"id": "proceedings/icoin/2015/8342/0",
"title": "2015 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/2/01326413",
"title": "A new complex wavelet transform by using RI-spline wavelet",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326413/12OmNvDI3Mj",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/2",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1994/5320/0/00287872",
"title": "A parallel implementation of the discrete wavelet transform",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1994/00287872/12OmNwc3wrS",
"parentPublication": {
"id": "proceedings/ssst/1994/5320/0",
"title": "Proceedings of 26th Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmete/2016/3411/0/07938986",
"title": "Satellite Image Enhancement using Discrete Wavelet Transform, Singular Value Decomposition and its Noise Performance Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icmete/2016/07938986/12OmNwx3QcQ",
"parentPublication": {
"id": "proceedings/icmete/2016/3411/0",
"title": "2016 International Conference on Micro-Electronics and Telecommunication Engineering (ICMETE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2016/1611/0/07822742",
"title": "A method of removing Ocular Artifacts from EEG using Discrete Wavelet Transform and Kalman Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2016/07822742/12OmNyrIaK9",
"parentPublication": {
"id": "proceedings/bibm/2016/1611/0",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2016/11/07422119",
"title": "A Fast Discrete Wavelet Transform Using Hybrid Parallelism on GPUs",
"doi": null,
"abstractUrl": "/journal/td/2016/11/07422119/13rRUwdIOUr",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122693",
"title": "Efficient Local Statistical Analysis via Integral Histograms with Discrete Wavelet Transform",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122693/13rRUyfbwqI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "14Fq0UgGPey",
"title": "2018 IEEE 34th International Conference on Data Engineering (ICDE)",
"acronym": "icde",
"groupId": "1000178",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "14Fq0Ud02xH",
"doi": "10.1109/ICDE.2018.00119",
"title": "Onion Curve: A Space Filling Curve with Near-Optimal Clustering",
"normalizedTitle": "Onion Curve: A Space Filling Curve with Near-Optimal Clustering",
"abstract": "Space filling curves (SFCs) are widely used in the design of indexes for spatial and temporal data. Clustering is a key metric for an SFC, that measures how well the curve preserves locality in mapping from higher dimensions to a single dimension. We present the onion curve, an SFC whose clustering performance is provably close to the optimal for cube and near-cube shaped query sets. We show that in contrast, the clustering performance of the widely used Hilbert curve can be far from optimal, even for cube-shaped queries. Since clustering performance is critical to the efficiency of multi-dimensional indexes based on the SFC, the onion curve can deliver improved performance for data structures for multi-dimensional data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Space filling curves (SFCs) are widely used in the design of indexes for spatial and temporal data. Clustering is a key metric for an SFC, that measures how well the curve preserves locality in mapping from higher dimensions to a single dimension. We present the onion curve, an SFC whose clustering performance is provably close to the optimal for cube and near-cube shaped query sets. We show that in contrast, the clustering performance of the widely used Hilbert curve can be far from optimal, even for cube-shaped queries. Since clustering performance is critical to the efficiency of multi-dimensional indexes based on the SFC, the onion curve can deliver improved performance for data structures for multi-dimensional data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Space filling curves (SFCs) are widely used in the design of indexes for spatial and temporal data. Clustering is a key metric for an SFC, that measures how well the curve preserves locality in mapping from higher dimensions to a single dimension. We present the onion curve, an SFC whose clustering performance is provably close to the optimal for cube and near-cube shaped query sets. We show that in contrast, the clustering performance of the widely used Hilbert curve can be far from optimal, even for cube-shaped queries. Since clustering performance is critical to the efficiency of multi-dimensional indexes based on the SFC, the onion curve can deliver improved performance for data structures for multi-dimensional data.",
"fno": "552000b236",
"keywords": [
"Data Structures",
"Pattern Clustering",
"Query Processing",
"SFC",
"Onion Curve",
"Space Filling Curve",
"Near Optimal Clustering",
"Spatial Data",
"Temporal Data",
"Clustering Performance",
"Near Cube Shaped Query Sets",
"Cube Shaped Queries",
"Hilbert Curve",
"Spatial Databases",
"Measurement",
"Data Structures",
"Indexing",
"Distributed Databases",
"Shape",
"Query Processing",
"Indexing",
"And Optimization"
],
"authors": [
{
"affiliation": null,
"fullName": "Pan Xu",
"givenName": "Pan",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Cuong Nguyen",
"givenName": "Cuong",
"surname": "Nguyen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Srikanta Tirthapura",
"givenName": "Srikanta",
"surname": "Tirthapura",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icde",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1236-1239",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-5520-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "552000b232",
"articleId": "14Fq0UR1tSR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "552000b240",
"articleId": "14Fq0YafsVw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ipdps/2007/0909/0/04227943",
"title": "Inverse Space-Filling Curve Partitioning of a Global Ocean Model",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2007/04227943/12OmNBLdKGC",
"parentPublication": {
"id": "proceedings/ipdps/2007/0909/0",
"title": "2007 IEEE International Parallel and Distributed Processing Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scc/2014/5066/0/5066a131",
"title": "A Density-Based Space Filling Curve for Location Privacy-Preserving",
"doi": null,
"abstractUrl": "/proceedings-article/scc/2014/5066a131/12OmNBcShUv",
"parentPublication": {
"id": "proceedings/scc/2014/5066/0",
"title": "2014 IEEE International Conference on Services Computing (SCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3pgcic/2014/4171/0/4171a469",
"title": "Optimizational Method of HBase Multi-dimensional Data Query Based on Hilbert Space-Filling Curve",
"doi": null,
"abstractUrl": "/proceedings-article/3pgcic/2014/4171a469/12OmNCdk2uJ",
"parentPublication": {
"id": "proceedings/3pgcic/2014/4171/0",
"title": "2014 Ninth International Conference on P2P, Parallel, Grid, Cloud and Internet Computing (3PGCIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2016/9005/0/07840585",
"title": "Cache-oblivious loops based on a novel space-filling curve",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2016/07840585/12OmNqFa5oA",
"parentPublication": {
"id": "proceedings/big-data/2016/9005/0",
"title": "2016 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicse/2009/4027/0/4027a260",
"title": "A Clustering Algorithm Based on Grid Partition of Space-Filling Curve",
"doi": null,
"abstractUrl": "/proceedings-article/icicse/2009/4027a260/12OmNrFTr4h",
"parentPublication": {
"id": "proceedings/icicse/2009/4027/0",
"title": "2009 Fourth International Conference on Internet Computing for Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2012/4675/0/4675b295",
"title": "A Lower Bound on Proximity Preservation by Space Filling Curves",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2012/4675b295/12OmNrkT7LE",
"parentPublication": {
"id": "proceedings/ipdps/2012/4675/0",
"title": "Parallel and Distributed Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2008/3434/0/3434a311",
"title": "OID: Optimized Information Discovery Using Space Filling Curves in P2P Overlay Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2008/3434a311/12OmNxwncnQ",
"parentPublication": {
"id": "proceedings/icpads/2008/3434/0",
"title": "2008 14th IEEE International Conference on Parallel and Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-css-icess/2015/8937/0/07336274",
"title": "A General Space-filling Curve Algorithm for Partitioning 2D Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-css-icess/2015/07336274/12OmNyQ7G3T",
"parentPublication": {
"id": "proceedings/hpcc-css-icess/2015/8937/0",
"title": "2015 IEEE 17th International Conference on High Performance Computing and Communications (HPCC), 2015 IEEE 7th International Symposium on Cyberspace Safety and Security (CSS) and 2015 IEEE 12th International Conf on Embedded Software and Systems (ICESS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2001/01/k0124",
"title": "Analysis of the Clustering Properties of the Hilbert Space-Filling Curve",
"doi": null,
"abstractUrl": "/journal/tk/2001/01/k0124/13rRUwhHcJE",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1971/04/01671851",
"title": "Alternative Algorithm for Hilbert's Space-Filling Curve",
"doi": null,
"abstractUrl": "/journal/tc/1971/04/01671851/13rRUxBJhlA",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwMXnv3",
"title": "2010 Third International Joint Conference on Computational Science and Optimization",
"acronym": "cso",
"groupId": "1002829",
"volume": "1",
"displayVolume": "1",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAle6PU",
"doi": "10.1109/CSO.2010.221",
"title": "A New Method for Computing Contact Angle of High Speed Ball Bearing",
"normalizedTitle": "A New Method for Computing Contact Angle of High Speed Ball Bearing",
"abstract": "With the development of high-speed and precision of ball bearing, it was found that conventional model using the raceway control theory failed to give satisfactory model accuracy. A new calculating method of contact angle of ball bearing, that considers elastohydrodynamic lubrication of high speed ball bearing and the simultaneous spin motion between the balls and the inner and outer raceway, is proposed. Utilizing rigid ferrule theory and Hertzian contact theory, the influences of contact load on friction coefficient are analyzed. The formula of the attitude angle of rolling element based on the d'Alembert's principle is introduced into the dynamic model. A case of contact angles is simulated with two computing methods. The comparison results show that the proposed method has much better accuracy than the conventional model and are suitable for precision bearing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the development of high-speed and precision of ball bearing, it was found that conventional model using the raceway control theory failed to give satisfactory model accuracy. A new calculating method of contact angle of ball bearing, that considers elastohydrodynamic lubrication of high speed ball bearing and the simultaneous spin motion between the balls and the inner and outer raceway, is proposed. Utilizing rigid ferrule theory and Hertzian contact theory, the influences of contact load on friction coefficient are analyzed. The formula of the attitude angle of rolling element based on the d'Alembert's principle is introduced into the dynamic model. A case of contact angles is simulated with two computing methods. The comparison results show that the proposed method has much better accuracy than the conventional model and are suitable for precision bearing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the development of high-speed and precision of ball bearing, it was found that conventional model using the raceway control theory failed to give satisfactory model accuracy. A new calculating method of contact angle of ball bearing, that considers elastohydrodynamic lubrication of high speed ball bearing and the simultaneous spin motion between the balls and the inner and outer raceway, is proposed. Utilizing rigid ferrule theory and Hertzian contact theory, the influences of contact load on friction coefficient are analyzed. The formula of the attitude angle of rolling element based on the d'Alembert's principle is introduced into the dynamic model. A case of contact angles is simulated with two computing methods. The comparison results show that the proposed method has much better accuracy than the conventional model and are suitable for precision bearing.",
"fno": "05533030",
"keywords": [
"Ball Bearings",
"Elastodynamics",
"Friction",
"Hydrodynamics",
"Lubrication",
"Mechanical Contact",
"Shear Modulus",
"Computing Contact Angle",
"High Speed Ball Bearing",
"Raceway Control Theory",
"Elastohydrodynamic Lubrication",
"Rigid Ferrule Theory",
"Hertzian Contact Theory",
"Friction Coefficient",
"Precision Bearing",
"Ball Bearings",
"Aerodynamics",
"Friction",
"Educational Technology",
"Control Theory",
"Lubrication",
"Rolling Bearings",
"Equations",
"Jacobian Matrices",
"Optimization Methods",
"Ball Bearing",
"Computing Method",
"Contact Angle",
"Dynamics"
],
"authors": [
{
"affiliation": null,
"fullName": "Chunli Lei",
"givenName": "Chunli",
"surname": "Lei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhiyuan Rui",
"givenName": "Zhiyuan",
"surname": "Rui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jun Liu",
"givenName": "Jun",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ruicheng Feng",
"givenName": "Ruicheng",
"surname": "Feng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Juntian Zhao",
"givenName": "Juntian",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cso",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-05-01T00:00:00",
"pubType": "proceedings",
"pages": "331-334",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-6812-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05532953",
"articleId": "12OmNwtWfMJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05533172",
"articleId": "12OmNCxL9R3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2010/3962/1/3962a833",
"title": "Contact Stress and Deformation of Blade Bearing in Wind Turbine",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962a833/12OmNBpVPXw",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/1",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2011/4296/2/4296c221",
"title": "Finite Element Analysis of the Contact Problem for a Wire Race Ball Bearing Used in a Rotating Platform",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2011/4296c221/12OmNqzu6V0",
"parentPublication": {
"id": "proceedings/icmtma/2011/4296/2",
"title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2011/4296/3/4296e672",
"title": "The Analysis of the Contact Stress of Hydraulic Excavator Slewing Bearing",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2011/4296e672/12OmNrkT7LC",
"parentPublication": {
"id": "proceedings/icmtma/2011/4296/3",
"title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gmp/2000/0562/0/05620385",
"title": "Gn-Blending with Rolling Ball Contact Curves",
"doi": null,
"abstractUrl": "/proceedings-article/gmp/2000/05620385/12OmNvA1hrc",
"parentPublication": {
"id": "proceedings/gmp/2000/0562/0",
"title": "Geometric Modeling and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031b320",
"title": "Multibody Dynamics Simulation of Balls Impact-Contact Mechanics in Ball Screw Mechanism",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031b320/12OmNyr8Yoz",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccie/2010/4026/1/4026a320",
"title": "Optimal Design of High Speed Angular Contact Ball Bearing Using a Multiobjective Evolution Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ccie/2010/4026a320/12OmNywfKBK",
"parentPublication": {
"id": "proceedings/ccie/2010/4026/1",
"title": "Computing, Control and Industrial Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsps/2009/3654/0/3654a292",
"title": "Study and Construction of an Apparatus that Automatically Monitors Vibration and Wears in Radial Ball Bearings which are Loaded in Radial Direction",
"doi": null,
"abstractUrl": "/proceedings-article/icsps/2009/3654a292/12OmNzRHOQg",
"parentPublication": {
"id": "proceedings/icsps/2009/3654/0",
"title": "2009 International Conference on Signal Processing Systems (ICSPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2017/3013/0/3013b263",
"title": "Study on Temperature Control Method of High Speed Ball Bearing with Oil-Air Lubrication",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2017/3013b263/12OmNzZWbN2",
"parentPublication": {
"id": "proceedings/icisce/2017/3013/0",
"title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wceea/2022/5952/0/595200a241",
"title": "Analysis of the relationship between contact deformation and contact force of bearing roller based on plastic deformation",
"doi": null,
"abstractUrl": "/proceedings-article/wceea/2022/595200a241/1J7WEw0KuJi",
"parentPublication": {
"id": "proceedings/wceea/2022/5952/0",
"title": "2022 International Conference on Wireless Communications, Electrical Engineering and Automation (WCEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2020/6698/0/669800a969",
"title": "Experiment Test and Numerical Simulation of Cable-Sliding Friction Aseismic Bearing with Buffer",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2020/669800a969/1kuHMdkSLEA",
"parentPublication": {
"id": "proceedings/icitbs/2020/6698/0",
"title": "2020 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzahbRb",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"acronym": "robot",
"groupId": "1000639",
"volume": "0",
"displayVolume": "0",
"year": "1991",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqJZgBi",
"doi": "10.1109/ROBOT.1991.131579",
"title": "Bandwidth performance of a direct drive manipulator under joint torque and endpoint force control",
"normalizedTitle": "Bandwidth performance of a direct drive manipulator under joint torque and endpoint force control",
"abstract": "The experimental bandwidth limitations for digitally implemented joint torque and endpoint force control of a one degree-of-freedom direct drive manipulator are discussed. It is shown that an open loop joint torque controller can be used with good results to track endpoint force trajectories, but is limited in bandwidth to the natural frequency of the rigid-body mode. Endpoint force control compensates for the rigid-body mode and is capable of increasing the bandwidth above the joint torque controller (within the limits of the actuator). Both results suggest that a higher natural frequency of the system is desirable. In contrast, it is shown that a lower natural frequency is desirable in the case of contact discontinuity due to the large impulses created during impact.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The experimental bandwidth limitations for digitally implemented joint torque and endpoint force control of a one degree-of-freedom direct drive manipulator are discussed. It is shown that an open loop joint torque controller can be used with good results to track endpoint force trajectories, but is limited in bandwidth to the natural frequency of the rigid-body mode. Endpoint force control compensates for the rigid-body mode and is capable of increasing the bandwidth above the joint torque controller (within the limits of the actuator). Both results suggest that a higher natural frequency of the system is desirable. In contrast, it is shown that a lower natural frequency is desirable in the case of contact discontinuity due to the large impulses created during impact.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The experimental bandwidth limitations for digitally implemented joint torque and endpoint force control of a one degree-of-freedom direct drive manipulator are discussed. It is shown that an open loop joint torque controller can be used with good results to track endpoint force trajectories, but is limited in bandwidth to the natural frequency of the rigid-body mode. Endpoint force control compensates for the rigid-body mode and is capable of increasing the bandwidth above the joint torque controller (within the limits of the actuator). Both results suggest that a higher natural frequency of the system is desirable. In contrast, it is shown that a lower natural frequency is desirable in the case of contact discontinuity due to the large impulses created during impact.",
"fno": "00131579",
"keywords": [
"Digital Control",
"Force Control",
"Robots",
"Torque Control",
"Joint Torque Control",
"Trajectories Tracking",
"Endpoint Force Control",
"Bandwidth Limitations",
"One Degree Of Freedom Direct Drive Manipulator",
"Rigid Body Mode",
"Higher Natural Frequency",
"Lower Natural Frequency",
"Contact Discontinuity",
"Bandwidth",
"Force Control",
"Force Feedback",
"Torque Control",
"Force Sensors",
"Frequency",
"Friction",
"Open Loop Systems",
"Robots",
"Aluminum"
],
"authors": [
{
"affiliation": "Hughes Aircraft Co., El Segundo, CA, USA",
"fullName": "H.C. Fowler",
"givenName": "H.C.",
"surname": "Fowler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "S.D. Eppinger",
"givenName": "S.D.",
"surname": "Eppinger",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "robot",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1991-01-01T00:00:00",
"pubType": "proceedings",
"pages": "230,231,232,233,234,235,236,237",
"year": "1991",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00131578",
"articleId": "12OmNzcPA72",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00131580",
"articleId": "12OmNC2fGxS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icece/2010/4031/0/4031b336",
"title": "Neural-Network-Based Six-axis Force/Torque Robot Sensor Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031b336/12OmNBOll84",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1989/1938/0/00100030",
"title": "Integration of tactile force and joint torque information in a whole-arm manipulator",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1989/00100030/12OmNCwlacK",
"parentPublication": {
"id": "proceedings/robot/1989/1938/0",
"title": "1989 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1992/2665/0/00712212",
"title": "Misalignment Force Control in Two Manipulator Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1992/00712212/12OmNqzcvAY",
"parentPublication": {
"id": "proceedings/ssst/1992/2665/0",
"title": "The 24th Southeastern Symposium on System Theory and The 3rd Annual Symposium on Communications, Signal Processing Expert Systems, and ASIC VLSI Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00219983",
"title": "Experimental study on torque control using harmonic drive built-in torque sensors",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00219983/12OmNwJybO3",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012101",
"title": "Adaptive load-sharing force control for two-arm manipulators",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012101/12OmNxtONZw",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/2/71082144",
"title": "Large force-task planning for mobile and redundant robots",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71082144/12OmNxveNGC",
"parentPublication": {
"id": "proceedings/iros/1995/7108/2",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131826",
"title": "Design of a four degree-of-freedom force-reflecting manipulandum with a specified force/torque workspace",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131826/12OmNxwENIo",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220167",
"title": "A tactile sensing method for employing force/torque information through insensitive probes",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220167/12OmNzdoMo4",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00219949",
"title": "Learning control for robot tasks under geometric endpoint constraints",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00219949/12OmNzxPTHB",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/03/tth2011030221",
"title": "Force, Torque, and Stiffness: Interactions in Perceptual Discrimination",
"doi": null,
"abstractUrl": "/journal/th/2011/03/tth2011030221/13rRUxC0SP2",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzw8jh3",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"acronym": "robot",
"groupId": "1000639",
"volume": "0",
"displayVolume": "0",
"year": "1988",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs4S8Jb",
"doi": "10.1109/ROBOT.1988.12052",
"title": "Kinematics and control of multifingered hands with rolling contact",
"normalizedTitle": "Kinematics and control of multifingered hands with rolling contact",
"abstract": "The kinematics of rolling contact for two surfaces of arbitrary shape rolling of each other is derived. Applying these kinematic equations to two planar multifingered hands manipulating some object of arbitrary shape, a scheme is presented for the control of these hands which is a generalization of the computed torque method of control of robot manipulators. In implementing the control, it is required that all applied forces lie within the friction cone of the object so that sliding does not occur. The theory is illustrated with graphic simulations of the control law applied to the system dynamics for two examples.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The kinematics of rolling contact for two surfaces of arbitrary shape rolling of each other is derived. Applying these kinematic equations to two planar multifingered hands manipulating some object of arbitrary shape, a scheme is presented for the control of these hands which is a generalization of the computed torque method of control of robot manipulators. In implementing the control, it is required that all applied forces lie within the friction cone of the object so that sliding does not occur. The theory is illustrated with graphic simulations of the control law applied to the system dynamics for two examples.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The kinematics of rolling contact for two surfaces of arbitrary shape rolling of each other is derived. Applying these kinematic equations to two planar multifingered hands manipulating some object of arbitrary shape, a scheme is presented for the control of these hands which is a generalization of the computed torque method of control of robot manipulators. In implementing the control, it is required that all applied forces lie within the friction cone of the object so that sliding does not occur. The theory is illustrated with graphic simulations of the control law applied to the system dynamics for two examples.",
"fno": "00012052",
"keywords": [
"Kinematics",
"Robots",
"Torque Control",
"Robots",
"Multifingered Hands",
"Rolling Contact",
"Kinematics",
"Torque Method",
"Friction Cone",
"System Dynamics",
"Kinematics",
"Shape Control",
"Torque Control",
"Equations",
"Robot Control",
"Manipulator Dynamics",
"Force Control",
"Friction",
"Graphics",
"Computational Modeling"
],
"authors": [
{
"affiliation": "Dept. of Electr. Eng. & Comput. Sci., California Univ., Berkeley, CA, USA",
"fullName": "A. Cole",
"givenName": "A.",
"surname": "Cole",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng. & Comput. Sci., California Univ., Berkeley, CA, USA",
"fullName": "J. Hauser",
"givenName": "J.",
"surname": "Hauser",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng. & Comput. Sci., California Univ., Berkeley, CA, USA",
"fullName": "S. Sastry",
"givenName": "S.",
"surname": "Sastry",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "robot",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1988-01-01T00:00:00",
"pubType": "proceedings",
"pages": "228,229,230,231,232,233",
"year": "1988",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00012051",
"articleId": "12OmNxTVU22",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00012053",
"articleId": "12OmNAlvI27",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iros/1995/7108/2/71082194",
"title": "Rolling with deformable fingertips",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71082194/12OmNAZx8Nx",
"parentPublication": {
"id": "proceedings/iros/1995/7108/2",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012053",
"title": "Inverse kinematics for a multifingered hand",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012053/12OmNAlvI27",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012126",
"title": "Cartesian stiffness control of the JPL/Stanford/Salisbury hand",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012126/12OmNqEjhVZ",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131656",
"title": "Planning grasp strategies for multifingered robot hands",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131656/12OmNqI04TK",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012055",
"title": "Evaluation and determination of grasping forces for multifingered hands",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012055/12OmNqzu6Kx",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1989/1938/0/00100054",
"title": "Control experiments in planar manipulation and grasping",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1989/00100054/12OmNvA1hew",
"parentPublication": {
"id": "proceedings/robot/1989/1938/0",
"title": "1989 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1989/1938/0/00100003",
"title": "Tactile shape sensing via single- and multifingered hands",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1989/00100003/12OmNx5piWi",
"parentPublication": {
"id": "proceedings/robot/1989/1938/0",
"title": "1989 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131659",
"title": "Fingertip force planning for multifingered robot hands",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131659/12OmNyoSbid",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012078",
"title": "On grasping and coordinated manipulation by a multifingered robot hand",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012078/12OmNyywxC5",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/2/71082186",
"title": "A successful multifingered hand design-the case of the raccoon",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71082186/12OmNzlD99r",
"parentPublication": {
"id": "proceedings/iros/1995/7108/2",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNywfKxW",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2003",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs5rkY5",
"doi": "10.1109/HAPTIC.2003.1191330",
"title": "Haptic Feedback Using Local Models of Interaction",
"normalizedTitle": "Haptic Feedback Using Local Models of Interaction",
"abstract": "A local model of interaction proposed for haptic rendering of rigid body motion is extended to the rigid body manipulation of articulated structures. In the proposed local model, the motion constraints imposed on the use by the articulated structure are rendered through the local dynamics, while those imposed by other virtual objects are rendered through impulsive, penalty, and friction contact forces. The proposed local model is used to interface a planar haptic device to a virtual world comprised of both rigid objects and articulated structures moving within an enclosure of rigid walls.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A local model of interaction proposed for haptic rendering of rigid body motion is extended to the rigid body manipulation of articulated structures. In the proposed local model, the motion constraints imposed on the use by the articulated structure are rendered through the local dynamics, while those imposed by other virtual objects are rendered through impulsive, penalty, and friction contact forces. The proposed local model is used to interface a planar haptic device to a virtual world comprised of both rigid objects and articulated structures moving within an enclosure of rigid walls.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A local model of interaction proposed for haptic rendering of rigid body motion is extended to the rigid body manipulation of articulated structures. In the proposed local model, the motion constraints imposed on the use by the articulated structure are rendered through the local dynamics, while those imposed by other virtual objects are rendered through impulsive, penalty, and friction contact forces. The proposed local model is used to interface a planar haptic device to a virtual world comprised of both rigid objects and articulated structures moving within an enclosure of rigid walls.",
"fno": "18900416",
"keywords": [],
"authors": [
{
"affiliation": "University of British Columbia",
"fullName": "Daniela Constantinescu",
"givenName": "Daniela",
"surname": "Constantinescu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of British Columbia",
"fullName": "Septimiu E. Salcudean",
"givenName": "Septimiu E.",
"surname": "Salcudean",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of British Columbia",
"fullName": "Elizabeth A. Croft",
"givenName": "Elizabeth A.",
"surname": "Croft",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2003-03-01T00:00:00",
"pubType": "proceedings",
"pages": "416",
"year": "2003",
"issn": null,
"isbn": "0-7695-1890-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "18900410",
"articleId": "12OmNqFJhGh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "18900422",
"articleId": "12OmNC8dgeO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icetet/2009/3884/0/pid980664",
"title": "Effect of Haptic Force Feedback on Upper Limb",
"doi": null,
"abstractUrl": "/proceedings-article/icetet/2009/pid980664/12OmNA0dMRn",
"parentPublication": {
"id": "proceedings/icetet/2009/3884/0",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550197",
"title": "A novel 3D carousel based on pseudo-haptic feedback and gestural interaction for virtual showcasing",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550197/12OmNC2fGxW",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2004/2112/0/21120272",
"title": "A Multi-Threaded Approach for Deformable/Rigid Contacts with Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2004/21120272/12OmNrIJqsc",
"parentPublication": {
"id": "proceedings/haptics/2004/2112/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890066",
"title": "Haptic Volume Interaction with Anatomic Models at Sub-Voxel Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890066/12OmNrJRP6R",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2003/1890/0/18900109",
"title": "Dynamic Local Models for Stable Multi-Contact Haptic Interaction with Deformable Objects",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2003/18900109/12OmNzBOhQD",
"parentPublication": {
"id": "proceedings/haptics/2003/1890/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2000/0478/0/04780031",
"title": "Dynamic Deformable Models for Enhanced Haptic Rendering in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780031/12OmNzBwGmI",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2011/4467/0/4467a217",
"title": "Function-Based Haptic Interaction in Cyberworlds",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2011/4467a217/12OmNzVGcFh",
"parentPublication": {
"id": "proceedings/cw/2011/4467/0",
"title": "2011 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2004/2112/0/21120018",
"title": "Accelerated Haptic Rendering of Polygonal Models through Local Descent",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2004/21120018/12OmNzxgHFx",
"parentPublication": {
"id": "proceedings/haptics/2004/2112/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2007/03/mcg2007030090",
"title": "A Precomputed Approach for Real-Time Haptic Interaction with Fluids",
"doi": null,
"abstractUrl": "/magazine/cg/2007/03/mcg2007030090/13rRUxjyX6p",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798195",
"title": "fARFEEL: Providing Haptic Sensation of Touched Objects Using Visuo-Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798195/1cJ18nc5sys",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwl8GHU",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNviZlN9",
"doi": "10.1109/3DUI.2013.6550206",
"title": "The god-finger method for improving 3D interaction with virtual objects through simulation of contact area",
"normalizedTitle": "The god-finger method for improving 3D interaction with virtual objects through simulation of contact area",
"abstract": "In physically-based virtual environments, interaction with objects generally happens through contact points that barely represent the area of contact between the user's hand and the virtual object. This representation of contacts contrasts with real life situations where our finger pads have the ability to deform slightly to match the shape of a touched object. In this paper, we propose a method called god-finger to simulate a contact area from a single contact point determined by collision detection, and usable in a rigid body physics engine. The method uses the geometry of the object and the force applied to it to determine additional contact points that will emulate the presence of a contact area between the user's proxy and the virtual object. It could improve the manipulation of objects by constraining the rotation of touched objects in a similar manner to actual finger pads. An implementation in a physics engine shows that the method could make for more realistic behaviour when manipulating objects while keeping high simulation rates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In physically-based virtual environments, interaction with objects generally happens through contact points that barely represent the area of contact between the user's hand and the virtual object. This representation of contacts contrasts with real life situations where our finger pads have the ability to deform slightly to match the shape of a touched object. In this paper, we propose a method called god-finger to simulate a contact area from a single contact point determined by collision detection, and usable in a rigid body physics engine. The method uses the geometry of the object and the force applied to it to determine additional contact points that will emulate the presence of a contact area between the user's proxy and the virtual object. It could improve the manipulation of objects by constraining the rotation of touched objects in a similar manner to actual finger pads. An implementation in a physics engine shows that the method could make for more realistic behaviour when manipulating objects while keeping high simulation rates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In physically-based virtual environments, interaction with objects generally happens through contact points that barely represent the area of contact between the user's hand and the virtual object. This representation of contacts contrasts with real life situations where our finger pads have the ability to deform slightly to match the shape of a touched object. In this paper, we propose a method called god-finger to simulate a contact area from a single contact point determined by collision detection, and usable in a rigid body physics engine. The method uses the geometry of the object and the force applied to it to determine additional contact points that will emulate the presence of a contact area between the user's proxy and the virtual object. It could improve the manipulation of objects by constraining the rotation of touched objects in a similar manner to actual finger pads. An implementation in a physics engine shows that the method could make for more realistic behaviour when manipulating objects while keeping high simulation rates.",
"fno": "06550206",
"keywords": [
"Vectors",
"Haptic Interfaces",
"Force",
"Geometry",
"Engines",
"Friction",
"Computational Modeling"
],
"authors": [
{
"affiliation": "INSA/Inria Rennes, Rennes, France",
"fullName": "Anthony Talvas",
"givenName": "Anthony",
"surname": "Talvas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "INSA/Inria Rennes, Rennes, France",
"fullName": "Maud Marchal",
"givenName": "Maud",
"surname": "Marchal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria Rennes, Rennes, France",
"fullName": "Anatole Lecuyer",
"givenName": "Anatole",
"surname": "Lecuyer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-03-01T00:00:00",
"pubType": "proceedings",
"pages": "111-114",
"year": "2013",
"issn": null,
"isbn": "978-1-4673-6097-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06550205",
"articleId": "12OmNApu5wU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06550207",
"articleId": "12OmNzcPAx3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2006/0226/0/02260032",
"title": "A Limit-Curve Based Soft Finger god-object Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260032/12OmNC2OSKc",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1989/1938/0/00100004",
"title": "Grasping polyhedral objects with slip",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1989/00100004/12OmNvkpliD",
"parentPublication": {
"id": "proceedings/robot/1989/1938/0",
"title": "1989 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptic/2006/0226/0/01627064",
"title": "A Limit-Curve Based Soft Finger god-object Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/haptic/2006/01627064/12OmNxFaLgx",
"parentPublication": {
"id": "proceedings/haptic/2006/0226/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2012/1204/0/06184183",
"title": "A generalized God-object method for plausible finger-based interactions in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2012/06184183/12OmNyPQ4Qp",
"parentPublication": {
"id": "proceedings/3dui/2012/1204/0",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2012/0430/0/06386331",
"title": "A common 3-finger grasp search algorithm for a set of planar objects",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386331/12OmNyv7m3Y",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00219920",
"title": "Finding antipodal point grasps on irregularly shaped objects",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00219920/12OmNzZEAI1",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/04/07968382",
"title": "Characterizing and Imaging Gross and Real Finger Contacts under Dynamic Loading",
"doi": null,
"abstractUrl": "/journal/th/2017/04/07968382/13rRUwInvfj",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06767148",
"title": "Implicit Multibody Penalty-BasedDistributed Contact",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06767148/13rRUwInvyz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/02/07858777",
"title": "Friction Reduction through Ultrasonic Vibration Part 1: Modelling Intermittent Contact",
"doi": null,
"abstractUrl": "/journal/th/2017/02/07858777/13rRUxZRboa",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07358132",
"title": "On Frictional Forces between the Finger and a Textured Surface during Active Touch",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07358132/13rRUxZzAhO",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNywfKxW",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2003",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxEBz4D",
"doi": "10.1109/HAPTIC.2003.1191304",
"title": "Real-Time Rigid Body Simulation Based on Volumetric Penalty Method",
"normalizedTitle": "Real-Time Rigid Body Simulation Based on Volumetric Penalty Method",
"abstract": "This paper proposes a new method for real-time rigid body simulations based on a volume rich penalty method. The penalty method, which employs spring-damper model, is a simple and useful method for real-time simulation of multi-bodies. However, simple penalty method cannot handle face-face contact, because simple penalty method cannot find application point of reflection force. We suppose distributed small spring-damper model to solve the problem. We analyze intersecting part of bodies and integrate forces and torques from distributed spring-damper models. We implement the simulator and compare our simulator with simple penalty method. It showed that our simulator solve the face-face contact problem. In addition, we attach haptic interface to the simulator for interaction. It shows that we were able to interact with virtual world by haptic interfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a new method for real-time rigid body simulations based on a volume rich penalty method. The penalty method, which employs spring-damper model, is a simple and useful method for real-time simulation of multi-bodies. However, simple penalty method cannot handle face-face contact, because simple penalty method cannot find application point of reflection force. We suppose distributed small spring-damper model to solve the problem. We analyze intersecting part of bodies and integrate forces and torques from distributed spring-damper models. We implement the simulator and compare our simulator with simple penalty method. It showed that our simulator solve the face-face contact problem. In addition, we attach haptic interface to the simulator for interaction. It shows that we were able to interact with virtual world by haptic interfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a new method for real-time rigid body simulations based on a volume rich penalty method. The penalty method, which employs spring-damper model, is a simple and useful method for real-time simulation of multi-bodies. However, simple penalty method cannot handle face-face contact, because simple penalty method cannot find application point of reflection force. We suppose distributed small spring-damper model to solve the problem. We analyze intersecting part of bodies and integrate forces and torques from distributed spring-damper models. We implement the simulator and compare our simulator with simple penalty method. It showed that our simulator solve the face-face contact problem. In addition, we attach haptic interface to the simulator for interaction. It shows that we were able to interact with virtual world by haptic interfaces.",
"fno": "18900326",
"keywords": [],
"authors": [
{
"affiliation": "Tokyo Institute of Technology",
"fullName": "Shoichi Hasegawa",
"givenName": "Shoichi",
"surname": "Hasegawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology",
"fullName": "Nobuaki Fujii",
"givenName": "Nobuaki",
"surname": "Fujii",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology",
"fullName": "Yasuharu Koike",
"givenName": "Yasuharu",
"surname": "Koike",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology",
"fullName": "Makoto Sato",
"givenName": "Makoto",
"surname": "Sato",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2003-03-01T00:00:00",
"pubType": "proceedings",
"pages": "326",
"year": "2003",
"issn": null,
"isbn": "0-7695-1890-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "18900318",
"articleId": "12OmNxZ2Giw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01191337",
"articleId": "1h0KVe8pPwY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/svr/2013/5001/0/06655784",
"title": "Cloth Simulation Using Triangular Mesh: A Study of Mesh Adaptivity",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2013/06655784/12OmNA1VnsC",
"parentPublication": {
"id": "proceedings/svr/2013/5001/0",
"title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cool-chips/2014/3810/0/06842947",
"title": "Parallel design of control systems utilizing dead time for embedded multicore processors",
"doi": null,
"abstractUrl": "/proceedings-article/cool-chips/2014/06842947/12OmNAHEpAN",
"parentPublication": {
"id": "proceedings/cool-chips/2014/3810/0",
"title": "2014 IEEE COOL Chips XVII (COOL Chips)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicsyn/2010/4158/0/4158a017",
"title": "AntNet with Reward-Penalty Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cicsyn/2010/4158a017/12OmNCgrD9N",
"parentPublication": {
"id": "proceedings/cicsyn/2010/4158/0",
"title": "Computational Intelligence, Communication Systems and Networks, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2001/7237/0/00982388",
"title": "Merging deformable and rigid body mechanics simulation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2001/00982388/12OmNqIhFMw",
"parentPublication": {
"id": "proceedings/ca/2001/7237/0",
"title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3pgcic/2011/4531/0/4531a402",
"title": "Modeling and Simulation of a Vehicle Suspension with Variable Damping and Elastic Properties versus the Excitation Frequency",
"doi": null,
"abstractUrl": "/proceedings-article/3pgcic/2011/4531a402/12OmNvA1hpI",
"parentPublication": {
"id": "proceedings/3pgcic/2011/4531/0",
"title": "P2P, Parallel, Grid, Cloud, and Internet Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ica/2016/3931/0/3931a116",
"title": "Standing Balance Switching Control Using the Stability Status of Humanoid Robot",
"doi": null,
"abstractUrl": "/proceedings-article/ica/2016/3931a116/12OmNzvhvxP",
"parentPublication": {
"id": "proceedings/ica/2016/3931/0",
"title": "2016 IEEE International Conference on Agents (ICA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/01/ttg2008010231",
"title": "A Fast and Stable Penalty Method for Rigid Body Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2008/01/ttg2008010231/13rRUILLkvj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/04/ttg2008040783",
"title": "Rigid Body Cable for Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2008/04/ttg2008040783/13rRUwwaKt0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2009/01/tth2009010002",
"title": "Adaptive Control for Improved Transparency in Haptic Simulations",
"doi": null,
"abstractUrl": "/journal/th/2009/01/tth2009010002/13rRUyoPSPe",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150771",
"title": "Visual 3D Reconstruction and Dynamic Simulation of Fruit Trees for Robotic Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150771/1lPH4XuKzIY",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzlUKD1",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"acronym": "case",
"groupId": "1001095",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzWfp3m",
"doi": "10.1109/CoASE.2012.6386423",
"title": "Manipulation with vibratory velocity fields on a tilted plate",
"normalizedTitle": "Manipulation with vibratory velocity fields on a tilted plate",
"abstract": "We examine the dynamics of point parts in frictional contact with a periodically vibrating, flat, rigid plate that is nominally tilted with respect to horizontal. If the friction law satisfies the maximum power inequality, then part dynamics on the tilted plate are equivalent to part dynamics on a horizontal plate with a different friction law that also satisfies the maximum power inequality. For small angles of tilt, this equivalence means that every periodic plate motion induces a position-dependent velocity field through which parts slide. Assuming Coulomb friction, some of the fields obtainable with a tilted plate are impossible to generate with a horizontal plate; other fields that require complicated motions on a horizontal plate can be generated with simpler motions on a tilted plate.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We examine the dynamics of point parts in frictional contact with a periodically vibrating, flat, rigid plate that is nominally tilted with respect to horizontal. If the friction law satisfies the maximum power inequality, then part dynamics on the tilted plate are equivalent to part dynamics on a horizontal plate with a different friction law that also satisfies the maximum power inequality. For small angles of tilt, this equivalence means that every periodic plate motion induces a position-dependent velocity field through which parts slide. Assuming Coulomb friction, some of the fields obtainable with a tilted plate are impossible to generate with a horizontal plate; other fields that require complicated motions on a horizontal plate can be generated with simpler motions on a tilted plate.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We examine the dynamics of point parts in frictional contact with a periodically vibrating, flat, rigid plate that is nominally tilted with respect to horizontal. If the friction law satisfies the maximum power inequality, then part dynamics on the tilted plate are equivalent to part dynamics on a horizontal plate with a different friction law that also satisfies the maximum power inequality. For small angles of tilt, this equivalence means that every periodic plate motion induces a position-dependent velocity field through which parts slide. Assuming Coulomb friction, some of the fields obtainable with a tilted plate are impossible to generate with a horizontal plate; other fields that require complicated motions on a horizontal plate can be generated with simpler motions on a tilted plate.",
"fno": "06386423",
"keywords": [
"Friction",
"Manipulator Dynamics",
"Plates Structures",
"Vibrations",
"Vibratory Velocity Fields",
"Tilted Plate",
"Point Parts Dynamics",
"Frictional Contact",
"Periodically Vibrating Flat Rigid Plate",
"Friction Law",
"Maximum Power Inequality",
"Coulomb Friction",
"Horizontal Plate",
"Friction",
"Limit Cycles",
"Force",
"Acceleration",
"Dynamics",
"Vectors",
"Polynomials"
],
"authors": [
{
"affiliation": "Department of Mechanical Engineering, Northwestern University, Evanston, IL 60208 USA",
"fullName": "Thomas H. Vose",
"givenName": "Thomas H.",
"surname": "Vose",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical Engineering, Northwestern University, Evanston, IL 60208 USA",
"fullName": "Paul Umbanhowar",
"givenName": "Paul",
"surname": "Umbanhowar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical Engineering, Northwestern University, Evanston, IL 60208 USA",
"fullName": "Kevin M. Lynch",
"givenName": "Kevin M.",
"surname": "Lynch",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "case",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-08-01T00:00:00",
"pubType": "proceedings",
"pages": "942-949",
"year": "2012",
"issn": "2161-8070",
"isbn": "978-1-4673-0430-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06386422",
"articleId": "12OmNwErpRx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06386424",
"articleId": "12OmNwpGgMV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccrd/2010/4043/0/4043a567",
"title": "Magnetic and Thermal Diffusion Effects on a Three-Dimensional Free Convective Flow Past a Uniformly Moving Porous Vertical Plate",
"doi": null,
"abstractUrl": "/proceedings-article/iccrd/2010/4043a567/12OmNBKW9K1",
"parentPublication": {
"id": "proceedings/iccrd/2010/4043/0",
"title": "Computer Research and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2015/8146/0/07340528",
"title": "Fuzzy modeling of complex, multi-source, dynamic friction",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2015/07340528/12OmNCbCrNv",
"parentPublication": {
"id": "proceedings/icat/2015/8146/0",
"title": "2015 XXV International Conference on Information, Communication and Automation Technologies (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1990/2038/0/00138126",
"title": "A scheme for estimating joint friction using a model-based controller",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1990/00138126/12OmNrHjqJ5",
"parentPublication": {
"id": "proceedings/ssst/1990/2038/0",
"title": "Proceedings The Twenty-Second Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fcst/2010/7779/0/05575533",
"title": "Research of Improving the Accuracy of License Plate Character Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/fcst/2010/05575533/12OmNvUsop0",
"parentPublication": {
"id": "proceedings/fcst/2010/7779/0",
"title": "2010 Fifth International Conference on Frontier of Computer Science and Technology (FCST 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candar/2013/2796/0/06726958",
"title": "Modeling of Stick-Slip Dynamic Behavior by Cellular Automata",
"doi": null,
"abstractUrl": "/proceedings-article/candar/2013/06726958/12OmNynsbzg",
"parentPublication": {
"id": "proceedings/candar/2013/2796/0",
"title": "2013 First International Symposium on Computing and Networking - Across Practical Development and Theoretical Research (CANDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/3/3962e962",
"title": "The Research for Methods of Mixing Processes Affect the Performance of Friction Plate",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962e962/12OmNzahc2x",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/3",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/03/07556272",
"title": "Vibrotactile Compliance Feedback for Tangential Force Interaction",
"doi": null,
"abstractUrl": "/journal/th/2017/03/07556272/13rRUwcS1D9",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/02/07858777",
"title": "Friction Reduction through Ultrasonic Vibration Part 1: Modelling Intermittent Contact",
"doi": null,
"abstractUrl": "/journal/th/2017/02/07858777/13rRUxZRboa",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmiae/2022/7396/0/739600a033",
"title": "Research on Forming Optimization of Pneumatic Support Rod Fixing Plate of Car Tailgate Based on Orthogonal Test",
"doi": null,
"abstractUrl": "/proceedings-article/icmiae/2022/739600a033/1JgrO26k3qE",
"parentPublication": {
"id": "proceedings/icmiae/2022/7396/0",
"title": "2022 International Conference on Manufacturing, Industrial Automation and Electronics (ICMIAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2021/3596/0/359600a001",
"title": "Design and research of the clutch device in knee joint exoskeleton drive",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2021/359600a001/1tMPP68elpe",
"parentPublication": {
"id": "proceedings/icedme/2021/3596/0",
"title": "2021 4th International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAsTgX5",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxuFBnH",
"doi": "10.1109/ICCV.2009.5459378",
"title": "Superresolution texture maps for multiview reconstruction",
"normalizedTitle": "Superresolution texture maps for multiview reconstruction",
"abstract": "We study the scenario of a multiview setting, where several calibrated views of a textured object with known surface geometry are available. The objective is to estimate a diffuse texture map as precisely as possible. A superresolution image formation model based on the camera properties leads to a total variation energy for the desired texture map, which can be recovered as the minimizer of the functional by solving the Euler-Lagrange equation on the surface. The PDE is transformed to planar texture space via an automatically created conformal atlas, where it can be solved using total variation deblurring. The proposed approach allows to recover a high-resolution, high-quality texture map even from lower-resolution photographs, which is of interest for a variety of image-based modeling applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We study the scenario of a multiview setting, where several calibrated views of a textured object with known surface geometry are available. The objective is to estimate a diffuse texture map as precisely as possible. A superresolution image formation model based on the camera properties leads to a total variation energy for the desired texture map, which can be recovered as the minimizer of the functional by solving the Euler-Lagrange equation on the surface. The PDE is transformed to planar texture space via an automatically created conformal atlas, where it can be solved using total variation deblurring. The proposed approach allows to recover a high-resolution, high-quality texture map even from lower-resolution photographs, which is of interest for a variety of image-based modeling applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We study the scenario of a multiview setting, where several calibrated views of a textured object with known surface geometry are available. The objective is to estimate a diffuse texture map as precisely as possible. A superresolution image formation model based on the camera properties leads to a total variation energy for the desired texture map, which can be recovered as the minimizer of the functional by solving the Euler-Lagrange equation on the surface. The PDE is transformed to planar texture space via an automatically created conformal atlas, where it can be solved using total variation deblurring. The proposed approach allows to recover a high-resolution, high-quality texture map even from lower-resolution photographs, which is of interest for a variety of image-based modeling applications.",
"fno": "05459378",
"keywords": [
"Geometry",
"Image Resolution",
"Image Restoration",
"Image Texture",
"Partial Differential Equations",
"Superresolution Texture Maps",
"Multiview Reconstruction",
"Surface Geometry",
"Diffuse Texture Map",
"Superresolution Image Formation Model",
"Camera Property",
"Euler Lagrange Equation",
"PDE",
"Planar Texture Space",
"Conformal Atlas",
"Total Variation Deblurring",
"Image Based Modeling Application",
"Cameras",
"Image Reconstruction",
"Surface Texture",
"Surface Reconstruction",
"Image Resolution",
"Surface Fitting",
"Solid Modeling",
"Rendering Computer Graphics",
"Geometry",
"Spatial Resolution"
],
"authors": [
{
"affiliation": "Computer Science Department, University of Bonn, Germany",
"fullName": "Bastian Goldluecke",
"givenName": "Bastian",
"surname": "Goldluecke",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science Department, University of Bonn, Germany",
"fullName": "Daniel Cremers",
"givenName": "Daniel",
"surname": "Cremers",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1677-1684",
"year": "2009",
"issn": "1550-5499",
"isbn": "978-1-4244-4420-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05459376",
"articleId": "12OmNxj23fW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05459379",
"articleId": "12OmNyo1o71",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391a513",
"title": "Variational Depth Superresolution Using Example-Based Edge Representations",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a513/12OmNAoDibR",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/2/3119b145",
"title": "Super Resolution of 3D Surface Texture Based on Eigen Images",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119b145/12OmNBC8AAT",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/3",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscsct/2008/3498/2/3498b230",
"title": "3D Surface Texture Synthesis Based on Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/iscsct/2008/3498b230/12OmNBRsVxV",
"parentPublication": {
"id": "proceedings/iscsct/2008/3498/1",
"title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410406",
"title": "Multi-view 3D model reconstruction: exploitation of color homogeneity in voxel mask",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410406/12OmNBZHih5",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420876",
"title": "Recovering object surfaces from viewed changes in surface texture patterns",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420876/12OmNvT2p2H",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761072",
"title": "A 2D model for face superresolution",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761072/12OmNwD1pQX",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2007/1834/0/04458156",
"title": "Image Superresolution under Spatially Structured Noise",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2007/04458156/12OmNwI8ce9",
"parentPublication": {
"id": "proceedings/isspit/2007/1834/0",
"title": "2007 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/3/01326590",
"title": "Superresolution reconstruction of hyperspectral images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326590/12OmNwc3wvk",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284896",
"title": "Hole Filling on Three-Dimensional Surface Texture",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284896/12OmNy4IF6j",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a433",
"title": "Separating Texture and Illumination for Single-Shot Structured Light Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a433/12OmNyq0zJQ",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy5hRda",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"acronym": "3dpvt",
"groupId": "1000000",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyY4rn6",
"doi": "10.1109/3DPVT.2006.72",
"title": "Hierarchical PCA Decomposition of Point Clouds",
"normalizedTitle": "Hierarchical PCA Decomposition of Point Clouds",
"abstract": "We present a hierarchical analysis technique for point clouds, based on Principal Component Analysis (PCA), a well known multivariate statistical method. The crux of the algorithm is a top-down planarity assessment of the underlying point data, after which individual planar patches are merged using a tree clustering technique. We will demonstrate how the results of this analysis are used as a preprocessing step for computer aided inspection of sheet metal folding, surface reconstruction and a hybrid point-polygon rendering algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a hierarchical analysis technique for point clouds, based on Principal Component Analysis (PCA), a well known multivariate statistical method. The crux of the algorithm is a top-down planarity assessment of the underlying point data, after which individual planar patches are merged using a tree clustering technique. We will demonstrate how the results of this analysis are used as a preprocessing step for computer aided inspection of sheet metal folding, surface reconstruction and a hybrid point-polygon rendering algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a hierarchical analysis technique for point clouds, based on Principal Component Analysis (PCA), a well known multivariate statistical method. The crux of the algorithm is a top-down planarity assessment of the underlying point data, after which individual planar patches are merged using a tree clustering technique. We will demonstrate how the results of this analysis are used as a preprocessing step for computer aided inspection of sheet metal folding, surface reconstruction and a hybrid point-polygon rendering algorithm.",
"fno": "282500591",
"keywords": [
"PCA",
"Point Cloud Analysis",
"Minimum Spanning Tree",
"Segmentation",
"Hierarchical Methods",
"Surface Reconstruction",
"CAD Analysis",
"Hybrid Rendering"
],
"authors": [
{
"affiliation": "Universitaire Campus, Belgium",
"fullName": "Jan Fransens",
"givenName": "Jan",
"surname": "Fransens",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universitaire Campus, Belgium",
"fullName": "Frank Van Reeth",
"givenName": "Frank",
"surname": "Van Reeth",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dpvt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-06-01T00:00:00",
"pubType": "proceedings",
"pages": "591-598",
"year": "2006",
"issn": null,
"isbn": "0-7695-2825-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "282500583",
"articleId": "12OmNwLOYRr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "282500599",
"articleId": "12OmNAle6j2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wism/2009/3817/0/3817a237",
"title": "Comparison of Prim and Kruskal on Shanghai and Shenzhen 300 Index Hierarchical Structure Tree",
"doi": null,
"abstractUrl": "/proceedings-article/wism/2009/3817a237/12OmNBA9oxD",
"parentPublication": {
"id": "proceedings/wism/2009/3817/0",
"title": "Web Information Systems and Mining, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isms/2013/4963/0/4963a409",
"title": "PFP-PCA: Parallel Fixed Point PCA Face Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/isms/2013/4963a409/12OmNBigFyh",
"parentPublication": {
"id": "proceedings/isms/2013/4963/0",
"title": "Intelligent Systems, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifcsta/2009/3930/1/3930a287",
"title": "Crowds' Classification Using Hierarchical Cluster, Rough Sets, Principal Component Analysis and Its Combination",
"doi": null,
"abstractUrl": "/proceedings-article/ifcsta/2009/3930a287/12OmNC8MsB3",
"parentPublication": {
"id": "proceedings/ifcsta/2009/3930/3",
"title": "Computer Science-Technology and Applications, International Forum on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d480",
"title": "Scale Matching of 3D Point Clouds by Finding Keyscales with Spin Images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d480/12OmNqBbI0C",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a169",
"title": "A Recursive Online Kernel PCA Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a169/12OmNwMobdc",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2011/4596/0/4596a110",
"title": "A Simple Hierarchical Clustering Method for Improving Flame Pixel Classification",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2011/4596a110/12OmNxVDuJT",
"parentPublication": {
"id": "proceedings/ictai/2011/4596/0",
"title": "2011 IEEE 23rd International Conference on Tools with Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2008/3304/4/3304d152",
"title": "Hierarchical Speaker Verification Based on PCA and Kernel Fisher Discriminant",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2008/3304d152/12OmNyqRnqp",
"parentPublication": {
"id": "proceedings/icnc/2008/3304/4",
"title": "2008 Fourth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1975/12/01672754",
"title": "An Algorithm for Determining the Topological Dimensionality of Point Clusters",
"doi": null,
"abstractUrl": "/journal/tc/1975/12/01672754/13rRUxASuu9",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/07/ttp2013071649",
"title": "Hierarchical Object Parsing from Structured Noisy Point Clouds",
"doi": null,
"abstractUrl": "/journal/tp/2013/07/ttp2013071649/13rRUyfKIEp",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b936",
"title": "Point Cloud Completion by Skip-Attention Network With Hierarchical Folding",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b936/1m3ocHBVL8I",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqJ8t9X",
"title": "2010 IEEE 23rd International Symposium on Computer-Based Medical Systems (CBMS)",
"acronym": "cbms",
"groupId": "1000153",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyqzLXT",
"doi": "10.1109/CBMS.2010.6042622",
"title": "Using interactive and multi-touch technology to support decision making in multidisciplinary team meetings",
"normalizedTitle": "Using interactive and multi-touch technology to support decision making in multidisciplinary team meetings",
"abstract": "In multidisciplinary team (MDT) meetings for colorectal and liver cancer, each patient case is reviewed while evidence, including digital image scans such as MRI and PET/CT, is presented by clinicians. Currently these images are projected onto a wall, limiting clinician interaction. While multi-touch and interactive tabletops have been used to enhance collaboration in various scenarios, some aspects such as image quality and touch resolution need to be evaluated in this particular scenario. In this paper we present the results of work conducted to test the suitability of using a DiamondTouch tabletop, a multi-touch and multi-user surface, in MDT meetings to enhance clinician interaction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In multidisciplinary team (MDT) meetings for colorectal and liver cancer, each patient case is reviewed while evidence, including digital image scans such as MRI and PET/CT, is presented by clinicians. Currently these images are projected onto a wall, limiting clinician interaction. While multi-touch and interactive tabletops have been used to enhance collaboration in various scenarios, some aspects such as image quality and touch resolution need to be evaluated in this particular scenario. In this paper we present the results of work conducted to test the suitability of using a DiamondTouch tabletop, a multi-touch and multi-user surface, in MDT meetings to enhance clinician interaction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In multidisciplinary team (MDT) meetings for colorectal and liver cancer, each patient case is reviewed while evidence, including digital image scans such as MRI and PET/CT, is presented by clinicians. Currently these images are projected onto a wall, limiting clinician interaction. While multi-touch and interactive tabletops have been used to enhance collaboration in various scenarios, some aspects such as image quality and touch resolution need to be evaluated in this particular scenario. In this paper we present the results of work conducted to test the suitability of using a DiamondTouch tabletop, a multi-touch and multi-user surface, in MDT meetings to enhance clinician interaction.",
"fno": "06042622",
"keywords": [
"Touch Resolution",
"Interactive Tabletops",
"Multitouch Tabletops",
"Decision Making Support",
"Multidisciplinary Team Meetings",
"Colorectal Cancer",
"Liver Cancer",
"Digital Image Scans",
"MRI",
"PET",
"CT",
"Clinician Interaction",
"Diamond Touch Tabletop",
"Multiuser Surface",
"Image Quality"
],
"authors": [
{
"affiliation": "Oxford e-Res. Centre, Univ. of Oxford, Oxford, UK",
"fullName": "M. S. Avila-Garcia",
"givenName": "M. S.",
"surname": "Avila-Garcia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Oxford e-Res. Centre, Univ. of Oxford, Oxford, UK",
"fullName": "A. E. Trefethen",
"givenName": "A. E.",
"surname": "Trefethen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Eng. Sci., Univ. of Oxford, Oxford, UK",
"fullName": "M. Brady",
"givenName": "M.",
"surname": "Brady",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nuffield Dept. of Surg., Univ. of Oxford, Oxford, UK",
"fullName": "F. Gleeson",
"givenName": "F.",
"surname": "Gleeson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cbms",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-10-01T00:00:00",
"pubType": "proceedings",
"pages": "98-103",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-9167-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06042619",
"articleId": "12OmNqGRGmP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06042623",
"articleId": "12OmNwcl7F0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2011/1189/0/05999025",
"title": "On record keeping at multidisciplinary team meetings",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2011/05999025/12OmNqzu6JR",
"parentPublication": {
"id": "proceedings/cbms/2011/1189/0",
"title": "2011 24th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2010/6261/0/05673353",
"title": "SpelLit: Development of a multi-touch application to foster literacy skills at elementary schools",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2010/05673353/12OmNxQOjDx",
"parentPublication": {
"id": "proceedings/fie/2010/6261/0",
"title": "2010 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2013/1053/0/06627815",
"title": "Developing a framework for evaluation of technology use at multidisciplinary meetings in healthcare",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2013/06627815/12OmNyfdOKt",
"parentPublication": {
"id": "proceedings/cbms/2013/1053/0",
"title": "2013 IEEE 26th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2010/9167/0/06042624",
"title": "Assessing support requirements for multidisciplinary team meetings",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2010/06042624/12OmNz6iO51",
"parentPublication": {
"id": "proceedings/cbms/2010/9167/0",
"title": "2010 IEEE 23rd International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2008/3165/0/3165a482",
"title": "Supporting Enhanced Collaboration in Distributed Multidisciplinary Care Team Meetings",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2008/3165a482/12OmNzIl3Ds",
"parentPublication": {
"id": "proceedings/cbms/2008/3165/0",
"title": "2008 21st IEEE International Symposium on Computer-Based Medical Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011121775",
"title": "Multi-Touch Table System for Medical Visualization: Application to Orthopedic Surgery Planning",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011121775/13rRUwInv4l",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2006/05/mcg2006050036",
"title": "Informing the Design of Direct-Touch Tabletops",
"doi": null,
"abstractUrl": "/magazine/cg/2006/05/mcg2006050036/13rRUwvT9iE",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1hyoTyc",
"doi": "10.1109/VR.2018.8446052",
"title": "Casting Virtual Shadows Based on Brightness Induction for Optical See-Through Displays",
"normalizedTitle": "Casting Virtual Shadows Based on Brightness Induction for Optical See-Through Displays",
"abstract": "This paper proposes a novel method for casting virtual shadows on real surfaces on an optical see-through head-mounted display without any extra physical filter devices. Instead, the method presents shadows as results of brightness induction. To produce brightness induction, we place a texture of the real scene with a certain transparency around the shadow area to amplify the luminance of the surrounding area. To make this amplification unnoticeable, the transparency of the surrounding region is gradually increased as the distance from the shadow region. In the experiment with 23 participants, we confirmed that users tend to perceive the shadow region is darker than a non-shadow area under the conditions where a circular virtual shadow is placed on a flat surface.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a novel method for casting virtual shadows on real surfaces on an optical see-through head-mounted display without any extra physical filter devices. Instead, the method presents shadows as results of brightness induction. To produce brightness induction, we place a texture of the real scene with a certain transparency around the shadow area to amplify the luminance of the surrounding area. To make this amplification unnoticeable, the transparency of the surrounding region is gradually increased as the distance from the shadow region. In the experiment with 23 participants, we confirmed that users tend to perceive the shadow region is darker than a non-shadow area under the conditions where a circular virtual shadow is placed on a flat surface.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a novel method for casting virtual shadows on real surfaces on an optical see-through head-mounted display without any extra physical filter devices. Instead, the method presents shadows as results of brightness induction. To produce brightness induction, we place a texture of the real scene with a certain transparency around the shadow area to amplify the luminance of the surrounding area. To make this amplification unnoticeable, the transparency of the surrounding region is gradually increased as the distance from the shadow region. In the experiment with 23 participants, we confirmed that users tend to perceive the shadow region is darker than a non-shadow area under the conditions where a circular virtual shadow is placed on a flat surface.",
"fno": "08446052",
"keywords": [
"Brightness",
"Optical Imaging",
"Electronic Mail",
"Casting",
"Cameras",
"Visualization",
"Optical Modulation",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Devices Displays And Imagers"
],
"authors": [
{
"affiliation": "Ritsumeikan University",
"fullName": "Shinnosuke Manabe",
"givenName": "Shinnosuke",
"surname": "Manabe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ritsumeikan University",
"fullName": "Sei Lkeda",
"givenName": "Sei",
"surname": "Lkeda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ritsumeikan University",
"fullName": "Asako Kimura",
"givenName": "Asako",
"surname": "Kimura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ritsumeikan University",
"fullName": "Fumihisa Shibata",
"givenName": "Fumihisa",
"surname": "Shibata",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "627-628",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446312",
"articleId": "13bd1eY1x42",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446522",
"articleId": "13bd1fWcuDF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2001/1272/2/127220400",
"title": "Stability Issues in Recovering Illumination Distribution from Brightness in Shadows",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2001/127220400/12OmNBziBby",
"parentPublication": {
"id": "proceedings/cvpr/2001/1272/2",
"title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459280",
"title": "Detection and removal of chromatic moving shadows in surveillance scenarios",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459280/12OmNx0RIJZ",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wiamis/2008/3130/0/3130a059",
"title": "Autonomous and Adaptive Learning of Shadows for Surveillance",
"doi": null,
"abstractUrl": "/proceedings-article/wiamis/2008/3130a059/12OmNzayNG9",
"parentPublication": {
"id": "proceedings/wiamis/2008/3130/0",
"title": "Image Analysis for Multimedia Interactive Services, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446441",
"title": "BrightView: Increasing Perceived Brightness of Optical See-Through Head-Mounted Displays Through Unnoticeable Incident Light Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446441/13bd1sv5NxY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06784085",
"title": "Making Graphical Information Visible in Real Shadows on Interactive Tabletops",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06784085/13rRUILLkvs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875905",
"title": "Low-Pass Filtered Volumetric Shadows",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875905/13rRUy2YLYx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2003/03/i0290",
"title": "Illumination from Shadows",
"doi": null,
"abstractUrl": "/journal/tp/2003/03/i0290/13rRUygT7z3",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600c060",
"title": "BrightFlow: Brightness-Change-Aware Unsupervised Learning of Optical Flow",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600c060/1KxUZa02Oyc",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798049",
"title": "Shadow Inducers: Inconspicuous Highlights for Casting Virtual Shadows on OST-HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798049/1cJ0UaezhG8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ17P8dQOs",
"doi": "10.1109/VR.2019.8798077",
"title": "Shadows Can Change the Shape Appearances of Real and Virtual Objects",
"normalizedTitle": "Shadows Can Change the Shape Appearances of Real and Virtual Objects",
"abstract": "The human visual system can estimate the shape of an object casting a shadow. The principle has been widely studied, and utilized in 3D scanners. However, estimating the shape of an object on which a shadow is casted (“screen object”) was rarely investigated. In this study, we show that the casted shadow distorts the perceived shape of a screen object in a class of virtual and physical scenes. In addition, we utilized the principle to create variations of Cafe-wall illusion. The principles can be utilized to control the perceived shapes without changing the structure of the screen object.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The human visual system can estimate the shape of an object casting a shadow. The principle has been widely studied, and utilized in 3D scanners. However, estimating the shape of an object on which a shadow is casted (“screen object”) was rarely investigated. In this study, we show that the casted shadow distorts the perceived shape of a screen object in a class of virtual and physical scenes. In addition, we utilized the principle to create variations of Cafe-wall illusion. The principles can be utilized to control the perceived shapes without changing the structure of the screen object.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The human visual system can estimate the shape of an object casting a shadow. The principle has been widely studied, and utilized in 3D scanners. However, estimating the shape of an object on which a shadow is casted (“screen object”) was rarely investigated. In this study, we show that the casted shadow distorts the perceived shape of a screen object in a class of virtual and physical scenes. In addition, we utilized the principle to create variations of Cafe-wall illusion. The principles can be utilized to control the perceived shapes without changing the structure of the screen object.",
"fno": "08798077",
"keywords": [
"Computer Vision",
"Data Visualisation",
"Image Motion Analysis",
"Image Representation",
"Image Sequences",
"Virtual Reality",
"Visual Perception",
"3 D Scanners",
"Cafe Wall Illusion",
"Casted Shadow",
"Human Visual System",
"Virtual Objects",
"Shape Appearances",
"Physical Scenes",
"Virtual Scenes",
"Screen Object",
"Perceived Shape",
"Shape",
"Three Dimensional Displays",
"Optical Distortion",
"Distortion",
"Casting",
"Two Dimensional Displays",
"Optical Imaging",
"Projection",
"Illumination And Shadow",
"Optical Illusion",
"Shape Representation",
"User Centered Design",
"Appearance And Texture Representation",
"Psychology"
],
"authors": [
{
"affiliation": "NTT Communication Science Laboratories",
"fullName": "Kazushi Maruya",
"givenName": "Kazushi",
"surname": "Maruya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo University of the Arts",
"fullName": "Tomoko Ohtani",
"givenName": "Tomoko",
"surname": "Ohtani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1076-1077",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798122",
"articleId": "1cJ0MR4xjWg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797910",
"articleId": "1cJ0HuNVaU0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a043",
"title": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a043/12OmNC1oT64",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a359",
"title": "A Method of Touching and Moving Virtual Shadows with Real Shadows",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a359/12OmNwpGgGH",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2014/5188/0/06831802",
"title": "Structure from shadow motion",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2014/06831802/12OmNzC5T3l",
"parentPublication": {
"id": "proceedings/iccp/2014/5188/0",
"title": "2014 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446052",
"title": "Casting Virtual Shadows Based on Brightness Induction for Optical See-Through Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446052/13bd1hyoTyc",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06784085",
"title": "Making Graphical Information Visible in Real Shadows on Interactive Tabletops",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06784085/13rRUILLkvs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g267",
"title": "Inferring Light Fields from Shadows",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g267/17D45XvMcaB",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a792",
"title": "Depth Perception in Augmented Reality: The Effects of Display, Shadow, and Position",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a792/1CJcnfNSFWg",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/spw/2022/9643/0/964300a229",
"title": "Using 3D Shadows to Detect Object Hiding Attacks on Autonomous Vehicle Perception",
"doi": null,
"abstractUrl": "/proceedings-article/spw/2022/964300a229/1FiwUMnWnkI",
"parentPublication": {
"id": "proceedings/spw/2022/9643/0",
"title": "2022 IEEE Security and Privacy Workshops (SPW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049677",
"title": "ShadowMover: Automatically Projecting Real Shadows onto Virtual Object",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049677/1KYooKy1LAQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798049",
"title": "Shadow Inducers: Inconspicuous Highlights for Casting Virtual Shadows on OST-HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798049/1cJ0UaezhG8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysuUuZCwM",
"doi": "10.1109/ISMAR50242.2020.00031",
"title": "Color Moiré Reduction and Resolution Improvement for Integral 3D Displays Using Multiple Wobbling Optics",
"normalizedTitle": "Color Moiré Reduction and Resolution Improvement for Integral 3D Displays Using Multiple Wobbling Optics",
"abstract": "The integral three-dimensional (3D) display is an ideal visual 3D user interface. It is a display method that fulfills many of the physiological factors of human vision. However, in integral 3D displays for mobile applications that use direct-view flat panels to display elemental images, color moiré is a problem that occurs because of the sampling of subpixels by elemental lenses and the insufficient resolution and depth reproduction of the reconstructed 3D image. In the conventional moiré reduction method, the degree of defocus of elemental lenses has to be set to a large value, which is one of the factors that reduces the performance in terms of depth reproduction. In contrast, only one-step optics can be installed and the installation positions are limited in the conventional wobbling method. This is because, in the method, which uses a birefringent optical element, two-step optics are thicker than the focal length of the lens array. For this reason, it was difficult to achieve ideal moiré reduction and depth reproduction performance improvements. To solve these problems, we propose a method that utilizes multiple optical wobbling spatiotemporal multiplexing using polarization diffractive elements and liquid-crystal polarization controllers. Using the proposed method, the wobbling optics can be designed to be thin, allowing two-step optics to be installed between the display panel and lens array. When the moiré modulation degree without wobbling is normalized as 100%, it decreases to 25% with wobbling. The proposed method not only achieves effective color moiré reduction without deteriorating the 3D image quality, but also can double the resolution of the elemental images to improve the depth reproduction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The integral three-dimensional (3D) display is an ideal visual 3D user interface. It is a display method that fulfills many of the physiological factors of human vision. However, in integral 3D displays for mobile applications that use direct-view flat panels to display elemental images, color moiré is a problem that occurs because of the sampling of subpixels by elemental lenses and the insufficient resolution and depth reproduction of the reconstructed 3D image. In the conventional moiré reduction method, the degree of defocus of elemental lenses has to be set to a large value, which is one of the factors that reduces the performance in terms of depth reproduction. In contrast, only one-step optics can be installed and the installation positions are limited in the conventional wobbling method. This is because, in the method, which uses a birefringent optical element, two-step optics are thicker than the focal length of the lens array. For this reason, it was difficult to achieve ideal moiré reduction and depth reproduction performance improvements. To solve these problems, we propose a method that utilizes multiple optical wobbling spatiotemporal multiplexing using polarization diffractive elements and liquid-crystal polarization controllers. Using the proposed method, the wobbling optics can be designed to be thin, allowing two-step optics to be installed between the display panel and lens array. When the moiré modulation degree without wobbling is normalized as 100%, it decreases to 25% with wobbling. The proposed method not only achieves effective color moiré reduction without deteriorating the 3D image quality, but also can double the resolution of the elemental images to improve the depth reproduction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The integral three-dimensional (3D) display is an ideal visual 3D user interface. It is a display method that fulfills many of the physiological factors of human vision. However, in integral 3D displays for mobile applications that use direct-view flat panels to display elemental images, color moiré is a problem that occurs because of the sampling of subpixels by elemental lenses and the insufficient resolution and depth reproduction of the reconstructed 3D image. In the conventional moiré reduction method, the degree of defocus of elemental lenses has to be set to a large value, which is one of the factors that reduces the performance in terms of depth reproduction. In contrast, only one-step optics can be installed and the installation positions are limited in the conventional wobbling method. This is because, in the method, which uses a birefringent optical element, two-step optics are thicker than the focal length of the lens array. For this reason, it was difficult to achieve ideal moiré reduction and depth reproduction performance improvements. To solve these problems, we propose a method that utilizes multiple optical wobbling spatiotemporal multiplexing using polarization diffractive elements and liquid-crystal polarization controllers. Using the proposed method, the wobbling optics can be designed to be thin, allowing two-step optics to be installed between the display panel and lens array. When the moiré modulation degree without wobbling is normalized as 100%, it decreases to 25% with wobbling. The proposed method not only achieves effective color moiré reduction without deteriorating the 3D image quality, but also can double the resolution of the elemental images to improve the depth reproduction.",
"fno": "850800a109",
"keywords": [
"Birefringence",
"Flat Panel Displays",
"Image Resolution",
"Image Sensors",
"Lenses",
"Light Polarisation",
"Liquid Crystal Devices",
"Optical Arrays",
"Optical Control",
"Optical Design Techniques",
"Optical Modulation",
"Spatiotemporal Phenomena",
"Three Dimensional Displays",
"Resolution Improvement",
"Integral 3 D Displays",
"Multiple Wobbling Optics",
"Visual 3 D User Interface",
"Direct View Flat Panels",
"Elemental Images",
"Elemental Lens",
"Reconstructed 3 D Image",
"Conventional Moire X 0301 Reduction Method",
"One Step Optics",
"Birefringent Optical Element",
"Two Step Optics",
"Depth Reproduction Performance Improvements",
"Moire X 0301 Modulation Degree",
"3 D Image Quality",
"Color Moire X 0301 Reduction",
"Multiple Optical Wobbling Spatiotemporal Multiplexing",
"Polarization Diffractive Elements",
"Integral Three Dimensional Displays",
"Liquid Crystal Polarization Controllers",
"Three Dimensional Displays",
"Optical Polarization",
"Optical Diffraction",
"Image Color Analysis",
"Optical Imaging",
"Lenses",
"Optical Arrays",
"Integral Photography",
"3 D Display",
"Color Moir X 00 E 9",
"Depth Reproduction",
"Wobbling Optics",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Devices",
"Displays And Imagers",
"Hardware",
"Communication Hardware",
"Interfaces And Storage",
"Displays And Imagers"
],
"authors": [
{
"affiliation": "NHK (Japan Broadcasting Corporation),Science & Technology Research Laboratories",
"fullName": "Hisayuki Sasaki",
"givenName": "Hisayuki",
"surname": "Sasaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NHK (Japan Broadcasting Corporation),Science & Technology Research Laboratories",
"fullName": "Naoto Okaichi",
"givenName": "Naoto",
"surname": "Okaichi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NHK (Japan Broadcasting Corporation),Science & Technology Research Laboratories",
"fullName": "Hayato Watanabe",
"givenName": "Hayato",
"surname": "Watanabe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NHK (Japan Broadcasting Corporation),Science & Technology Research Laboratories",
"fullName": "Takuya Omura",
"givenName": "Takuya",
"surname": "Omura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NHK (Japan Broadcasting Corporation),Science & Technology Research Laboratories",
"fullName": "Masanori Kano",
"givenName": "Masanori",
"surname": "Kano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NHK (Japan Broadcasting Corporation),Science & Technology Research Laboratories",
"fullName": "Masahiro Kawakita",
"givenName": "Masahiro",
"surname": "Kawakita",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "109-116",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a101",
"articleId": "1pystZgPICk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a117",
"articleId": "1pyswxBB73y",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccp/2018/2526/0/08368469",
"title": "Focal sweep imaging with multi-focal diffractive optics",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2018/08368469/12OmNBV9Ii2",
"parentPublication": {
"id": "proceedings/iccp/2018/2526/0",
"title": "2018 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isot/2014/6752/0/07119449",
"title": "Cellulose Nanocrystals and Nanofibers for Smart Optics Materials",
"doi": null,
"abstractUrl": "/proceedings-article/isot/2014/07119449/12OmNvSbBtj",
"parentPublication": {
"id": "proceedings/isot/2014/6752/0",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600t9748",
"title": "Quantization-aware Deep Optics for Diffractive Snapshot Hyperspectral Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600t9748/1H0NBTZAs48",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a444",
"title": "A Method of Virtual Laminated Moiré Generation and Graphic Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a444/1ap5x6aSSWI",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798364",
"title": "Color Moiré Reduction Method for Thin Integral 3D Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798364/1cJ0XcgYa1W",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08827571",
"title": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08827571/1dgvaPxmhbi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c424",
"title": "Mop Moiré Patterns Using MopNet",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c424/1hQqrQ4lW5a",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998293",
"title": "ThinVR: Heterogeneous microlens arrays for compact, 180 degree FOV VR near-eye displays",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998293/1hrXiCmKkak",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150946",
"title": "Moiré Pattern Removal via Attentive Fractal Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150946/1lPHaXRDf4k",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150588",
"title": "C3Net: Demoiréing Network Attentive in Channel, Color and Concatenation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150588/1lPHlqxjrri",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1rsiStQcrjW",
"title": "2020 International Conference on Mechatronics, Electronics and Automotive Engineering (ICMEAE)",
"acronym": "icmeae",
"groupId": "1803398",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1rsiTBnVj7W",
"doi": "10.1109/ICMEAE51770.2020.00029",
"title": "Experimental Results from Non-Sub-Wavelength Gold Grating-Coupled Surface Plasmon Resonance using a Monochromatic Signal for Gold/Air Interface as the base for Biosensors",
"normalizedTitle": "Experimental Results from Non-Sub-Wavelength Gold Grating-Coupled Surface Plasmon Resonance using a Monochromatic Signal for Gold/Air Interface as the base for Biosensors",
"abstract": "In this paper, a gold diffraction grating is used to generate surface Plasmon resonance using a ppolarized monochromatic incident signal (λ = 650 nm) and a dimensional ratio between wavelength and spatial period of metallic grating less than the unit (λ/ d <; 1) for gold/air interface. The surface Plasmon resonance was detected using the angular detection technique. The experimental results show that the resonance angle is ≈15.75° and it is close to the theoretical results calculated (≈15.17°) for the gold diffraction grating. The preliminary results are important for the research and technical development of low-cost biosensors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, a gold diffraction grating is used to generate surface Plasmon resonance using a ppolarized monochromatic incident signal (λ = 650 nm) and a dimensional ratio between wavelength and spatial period of metallic grating less than the unit (λ/ d <; 1) for gold/air interface. The surface Plasmon resonance was detected using the angular detection technique. The experimental results show that the resonance angle is ≈15.75° and it is close to the theoretical results calculated (≈15.17°) for the gold diffraction grating. The preliminary results are important for the research and technical development of low-cost biosensors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, a gold diffraction grating is used to generate surface Plasmon resonance using a ppolarized monochromatic incident signal (λ = 650 nm) and a dimensional ratio between wavelength and spatial period of metallic grating less than the unit (λ/ d <; 1) for gold/air interface. The surface Plasmon resonance was detected using the angular detection technique. The experimental results show that the resonance angle is ≈15.75° and it is close to the theoretical results calculated (≈15.17°) for the gold diffraction grating. The preliminary results are important for the research and technical development of low-cost biosensors.",
"fno": "990400a127",
"keywords": [
"Biosensors",
"Diffraction Gratings",
"Gold",
"Surface Plasmon Resonance",
"Gold Diffraction Grating",
"Monochromatic Signal",
"Metallic Grating",
"Resonance Angle",
"Angular Detection Technique",
"Polarized Monochromatic Incident Signal",
"Nonsub Wavelength Gold Grating Coupled Surface Plasmon Resonance",
"Gold Air Interface",
"Biosensors",
"Wavelength 650 0 Nm",
"Au",
"Integrated Optics",
"Gold",
"Optical Polarization",
"Biomedical Optical Imaging",
"Diffraction Gratings",
"Optical Sensors",
"Surface Plasmons"
],
"authors": [
{
"affiliation": "CETyS Universidad,Ensenada,Mexico",
"fullName": "Miguel Angel Ponce-Camacho",
"givenName": "Miguel Angel",
"surname": "Ponce-Camacho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CETyS Universidad,Ensenada,Mexico",
"fullName": "Josue Aaron Lopez-Leyva",
"givenName": "Josue Aaron",
"surname": "Lopez-Leyva",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CETyS Universidad,Ensenada,Mexico",
"fullName": "Leiva Casemiro-Oliveira",
"givenName": "Leiva",
"surname": "Casemiro-Oliveira",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmeae",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "127-130",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9904-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "990400a122",
"articleId": "1rsiUkVGT5K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "990400a131",
"articleId": "1rsiTxLoBeU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/is3c/2016/3071/0/3071a436",
"title": "Comparison of Transmission-Type and Reflection-Type Surface Plasmon Resonance Based Fiber-Optic Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/is3c/2016/3071a436/12OmNApu5pS",
"parentPublication": {
"id": "proceedings/is3c/2016/3071/0",
"title": "2016 International Symposium on Computer, Consumer and Control (IS3C)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567496",
"title": "Phase gratings for plasmon focusing",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567496/12OmNqFa5op",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nems/2009/4629/0/05068525",
"title": "Theoretical analysis and fabrication of PDMS-based surface plasmon resonance sensor chips",
"doi": null,
"abstractUrl": "/proceedings-article/nems/2009/05068525/12OmNxG1yIs",
"parentPublication": {
"id": "proceedings/nems/2009/4629/0",
"title": "International Conference on Nano/Micro Engineered and Molecular Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicis/2011/1561/0/06063299",
"title": "Distributed Fiber Grating Spectrum Optimization Based on BP Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/icicis/2011/06063299/12OmNyS6RL2",
"parentPublication": {
"id": "proceedings/icicis/2011/1561/0",
"title": "2011 International Conference on Internet Computing and Information Services (ICICIS 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/biotechno/2008/3191/0/04561144",
"title": "Biosensing Based on Surface Plasmon Resonance of Gold Nanohole and Nanoring Arrays Fabricated by a Novel Nanosphere Lithography Technique",
"doi": null,
"abstractUrl": "/proceedings-article/biotechno/2008/04561144/12OmNzWOBaL",
"parentPublication": {
"id": "proceedings/biotechno/2008/3191/0",
"title": "International Conference on Biocomputation, Bioinformatics, and Biomedical Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2016/1269/0/07760159",
"title": "An enhanced efficient thin film silicon solar cell design based on silver nanoparticle",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2016/07760159/12OmNzb7Zrl",
"parentPublication": {
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceitsa/2021/1300/0/130000a048",
"title": "The Design of Optical Waveguide Sensor Based on Surface Plasmon Resonance",
"doi": null,
"abstractUrl": "/proceedings-article/iceitsa/2021/130000a048/1B2HC3SFhx6",
"parentPublication": {
"id": "proceedings/iceitsa/2021/1300/0",
"title": "2021 International Conference on Electronic Information Technology and Smart Agriculture (ICEITSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sti/2021/0007/0/09732610",
"title": "Design and Analysis of Circular Lattice PCF Biosensor -Based on Surface Plasmon Resonance",
"doi": null,
"abstractUrl": "/proceedings-article/sti/2021/09732610/1BN65PE7U40",
"parentPublication": {
"id": "proceedings/sti/2021/0007/0",
"title": "2021 3rd International Conference on Sustainable Technologies for Industry 4.0 (STI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2022/9978/0/997800a006",
"title": "A Transmission Grating-based Polarization Demodulated Grating Interferometric Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2022/997800a006/1ByeBzTTXmE",
"parentPublication": {
"id": "proceedings/icmtma/2022/9978/0",
"title": "2022 14th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2021/3596/0/359600a110",
"title": "A novel optical refractive index sensor based on VCSELs and gold nanoparticle arrays",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2021/359600a110/1tMPOU0iVQk",
"parentPublication": {
"id": "proceedings/icedme/2021/3596/0",
"title": "2021 4th International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyFCvPm",
"title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"acronym": "avss",
"groupId": "1001307",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAiFI9s",
"doi": "10.1109/AVSS.2013.6636605",
"title": "Keynote lecture 2: “Video synopsis”",
"normalizedTitle": "Keynote lecture 2: “Video synopsis”",
"abstract": "Surveillance video is practically never used. There are claims that 0.5% of the video is watched, but the true number is probably much smaller. The reason is clear: There are too many hours of surveillance video for people to watch. Most attempts to deal with the overflow of surveillance video involve the development of automatic video understanding: object recognition and activity understanding. Video Synopsis is complementary to video understanding. After objects are detected by background subtraction, video synopsis changes the time of display of each object such that more objects are \"packed\" into a shorter time. The resulting video is a shorter summary of the original video, where the objects are shown more densely than in the original video. While video synopsis can reduce, on the average, an hour of video into a minute, the synopsis loses causality: Objects that appear together in the original video may appear at different time in the synopsis, and vice versa. The combination of video synopsis and video understanding is expected to give the maximum benefit. As video understanding is still not fool proof, people need to examine its results. Since the video showing all objects of interest will be too long, video synopsis is an excellent tool to display efficiently the results of video understanding, for video examination and even for training classifiers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Surveillance video is practically never used. There are claims that 0.5% of the video is watched, but the true number is probably much smaller. The reason is clear: There are too many hours of surveillance video for people to watch. Most attempts to deal with the overflow of surveillance video involve the development of automatic video understanding: object recognition and activity understanding. Video Synopsis is complementary to video understanding. After objects are detected by background subtraction, video synopsis changes the time of display of each object such that more objects are \"packed\" into a shorter time. The resulting video is a shorter summary of the original video, where the objects are shown more densely than in the original video. While video synopsis can reduce, on the average, an hour of video into a minute, the synopsis loses causality: Objects that appear together in the original video may appear at different time in the synopsis, and vice versa. The combination of video synopsis and video understanding is expected to give the maximum benefit. As video understanding is still not fool proof, people need to examine its results. Since the video showing all objects of interest will be too long, video synopsis is an excellent tool to display efficiently the results of video understanding, for video examination and even for training classifiers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Surveillance video is practically never used. There are claims that 0.5% of the video is watched, but the true number is probably much smaller. The reason is clear: There are too many hours of surveillance video for people to watch. Most attempts to deal with the overflow of surveillance video involve the development of automatic video understanding: object recognition and activity understanding. Video Synopsis is complementary to video understanding. After objects are detected by background subtraction, video synopsis changes the time of display of each object such that more objects are \"packed\" into a shorter time. The resulting video is a shorter summary of the original video, where the objects are shown more densely than in the original video. While video synopsis can reduce, on the average, an hour of video into a minute, the synopsis loses causality: Objects that appear together in the original video may appear at different time in the synopsis, and vice versa. The combination of video synopsis and video understanding is expected to give the maximum benefit. As video understanding is still not fool proof, people need to examine its results. Since the video showing all objects of interest will be too long, video synopsis is an excellent tool to display efficiently the results of video understanding, for video examination and even for training classifiers.",
"fno": "06636605",
"keywords": [],
"authors": [
{
"affiliation": "Hebrew University of Jerusalem, Israel",
"fullName": "Shmuel Peleg",
"givenName": "Shmuel",
"surname": "Peleg",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "avss",
"isOpenAccess": true,
"showRecommendedArticles": false,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2013-08-01T00:00:00",
"pubType": "proceedings",
"pages": "XVII-XVII",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-0703-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06636604",
"articleId": "12OmNA0MZ5D",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06636606",
"articleId": "12OmNBBQZtv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNC1GueH",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAtK4h3",
"doi": "",
"title": "Key observation selection for effective video synopsis",
"normalizedTitle": "Key observation selection for effective video synopsis",
"abstract": "Millions of video surveillance cameras distribute around the world, and capture tremendous number of video data endlessly. Video browsing by frame is time consuming and inefficient, since needless information is abundant in the raw videos. Video synopsis is an effective way to solve this problem by producing a short video abstraction, while keeping the essential activities of the original video. However, traditional video synopsis only eliminates redundancy in spatial and temporal domain, while neglects redundancy in content domain. However, too many observations will make synopsis video confusing and degrade synopsis efficiency. In this paper, we present a novel video synopsis method based on key observation selection. Key observation selection is conducted for activity to eliminate content redundancy. We have demonstrated the effectiveness of our approach on real surveillance videos.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Millions of video surveillance cameras distribute around the world, and capture tremendous number of video data endlessly. Video browsing by frame is time consuming and inefficient, since needless information is abundant in the raw videos. Video synopsis is an effective way to solve this problem by producing a short video abstraction, while keeping the essential activities of the original video. However, traditional video synopsis only eliminates redundancy in spatial and temporal domain, while neglects redundancy in content domain. However, too many observations will make synopsis video confusing and degrade synopsis efficiency. In this paper, we present a novel video synopsis method based on key observation selection. Key observation selection is conducted for activity to eliminate content redundancy. We have demonstrated the effectiveness of our approach on real surveillance videos.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Millions of video surveillance cameras distribute around the world, and capture tremendous number of video data endlessly. Video browsing by frame is time consuming and inefficient, since needless information is abundant in the raw videos. Video synopsis is an effective way to solve this problem by producing a short video abstraction, while keeping the essential activities of the original video. However, traditional video synopsis only eliminates redundancy in spatial and temporal domain, while neglects redundancy in content domain. However, too many observations will make synopsis video confusing and degrade synopsis efficiency. In this paper, we present a novel video synopsis method based on key observation selection. Key observation selection is conducted for activity to eliminate content redundancy. We have demonstrated the effectiveness of our approach on real surveillance videos.",
"fno": "06460682",
"keywords": [
"Redundancy",
"Spatiotemporal Phenomena",
"Video Cameras",
"Video Retrieval",
"Video Surveillance",
"Video Surveillance Cameras",
"Video Data Capturing",
"Video Browsing",
"Video Abstraction",
"Spatial Domain Redundancy Elimination",
"Temporal Domain Redundancy Elimination",
"Video Synopsis Method",
"Key Observation Selection",
"Content Redundancy Elimination",
"Electron Tubes",
"Redundancy",
"Kernel",
"Cameras",
"Surveillance",
"Abstracts",
"Nominations And Elections"
],
"authors": [
{
"affiliation": "NLPR, Institute of Automation, Chinese Academy of Sciences",
"fullName": "Xiaobin Zhu",
"givenName": "Xiaobin",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NLPR, Institute of Automation, Chinese Academy of Sciences",
"fullName": "Jing Liu",
"givenName": "Jing",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NLPR, Institute of Automation, Chinese Academy of Sciences",
"fullName": "Jinqiao Wang",
"givenName": "Jinqiao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NLPR, Institute of Automation, Chinese Academy of Sciences",
"fullName": "Hanqing Lu",
"givenName": "Hanqing",
"surname": "Lu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "2528-2531",
"year": "2012",
"issn": "1051-4651",
"isbn": "978-1-4673-2216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06460681",
"articleId": "12OmNwGZNL4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06460683",
"articleId": "12OmNyS6Rxi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2013/0703/0/06636605",
"title": "Keynote lecture 2: “Video synopsis”",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2013/06636605/12OmNAiFI9s",
"parentPublication": {
"id": "proceedings/avss/2013/0703/0",
"title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2017/4283/0/4283a319",
"title": "Generalised Spatio Temporal Feature Based Important Activity Synopsis Generation",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2017/4283a319/12OmNBsLPd6",
"parentPublication": {
"id": "proceedings/sitis/2017/4283/0",
"title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2013/1604/0/06618226",
"title": "Demo paper: Video retrieval synopsis for moving objects",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2013/06618226/12OmNvSbBFf",
"parentPublication": {
"id": "proceedings/icmew/2013/1604/0",
"title": "2013 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a081",
"title": "Video Synopsis by Heterogeneous Multi-source Correlation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a081/12OmNxbmSEm",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169855",
"title": "Coherent event-based surveillance video synopsis using trajectory clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169855/12OmNyYm2F4",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2007/1630/0/04408934",
"title": "Webcam Synopsis: Peeking Around the World",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2007/04408934/12OmNzGlRz7",
"parentPublication": {
"id": "proceedings/iccv/2007/1630/0",
"title": "2007 11th IEEE International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a207",
"title": "Surveillance Video Synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a207/12OmNzt0IrV",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130487",
"title": "A surveillance video analysis and storage scheme for scalable synopsis browsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130487/12OmNzvz6G4",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/11/ttp2008111971",
"title": "Nonchronological Video Synopsis and Indexing",
"doi": null,
"abstractUrl": "/journal/tp/2008/11/ttp2008111971/13rRUxcKzWm",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/5555/01/09964076",
"title": "An improved interaction estimation and optimization method for surveillance video synopsis",
"doi": null,
"abstractUrl": "/magazine/mu/5555/01/09964076/1IAFJKKtWtq",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNySXF3a",
"title": "2013 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvSbBFf",
"doi": "10.1109/ICMEW.2013.6618226",
"title": "Demo paper: Video retrieval synopsis for moving objects",
"normalizedTitle": "Demo paper: Video retrieval synopsis for moving objects",
"abstract": "This paper demonstrates a novel retrieval synopsis system based on moving objects for surveillance video. With the popularization of digital video surveillance, massive data has been stored and the volume is still rising. How to utilize surveillance video effectively and efficiently is strategically important for practical applications. So as to improve the availability of video, intelligent applications, which contain object extraction, video indexing, video retrieval, and fast browsing, are performed with background modeling and retrieval synopsis. Specifically, retrieval synopsis offers three retrieval modes: playback retrieval mode, variable-fidelity retrieval mode, and attribute retrieval mode. That is, both integral synopsis video browsing and specific objects retrieval browsing can be executed on original video. As demos verified, the system can realize video retrieval and synopsis browsing in a flexible and efficient way.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper demonstrates a novel retrieval synopsis system based on moving objects for surveillance video. With the popularization of digital video surveillance, massive data has been stored and the volume is still rising. How to utilize surveillance video effectively and efficiently is strategically important for practical applications. So as to improve the availability of video, intelligent applications, which contain object extraction, video indexing, video retrieval, and fast browsing, are performed with background modeling and retrieval synopsis. Specifically, retrieval synopsis offers three retrieval modes: playback retrieval mode, variable-fidelity retrieval mode, and attribute retrieval mode. That is, both integral synopsis video browsing and specific objects retrieval browsing can be executed on original video. As demos verified, the system can realize video retrieval and synopsis browsing in a flexible and efficient way.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper demonstrates a novel retrieval synopsis system based on moving objects for surveillance video. With the popularization of digital video surveillance, massive data has been stored and the volume is still rising. How to utilize surveillance video effectively and efficiently is strategically important for practical applications. So as to improve the availability of video, intelligent applications, which contain object extraction, video indexing, video retrieval, and fast browsing, are performed with background modeling and retrieval synopsis. Specifically, retrieval synopsis offers three retrieval modes: playback retrieval mode, variable-fidelity retrieval mode, and attribute retrieval mode. That is, both integral synopsis video browsing and specific objects retrieval browsing can be executed on original video. As demos verified, the system can realize video retrieval and synopsis browsing in a flexible and efficient way.",
"fno": "06618226",
"keywords": [
"Moving Objects",
"Surveillance Video",
"Retrieval Synopsis",
"Video Browsing"
],
"authors": [
{
"affiliation": "Chinese Academy of Sciences R&D Center for Internet of Things, Wuxi, China",
"fullName": "Shizheng Wang",
"givenName": "Shizheng",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese Academy of Sciences R&D Center for Internet of Things, Wuxi, China",
"fullName": "Jianwei Yang",
"givenName": "Jianwei",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese Academy of Sciences R&D Center for Internet of Things, Wuxi, China",
"fullName": "Dong Yi",
"givenName": null,
"surname": "Dong Yi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NERCMS, School of Computer, LIESMARS, Wuhan University, China",
"fullName": "Zhongyuan Wang",
"givenName": null,
"surname": "Zhongyuan Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-2",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-1604-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06618225",
"articleId": "12OmNyOHG0V",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06618227",
"articleId": "12OmNrEL2A3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2013/0703/0/06636605",
"title": "Keynote lecture 2: “Video synopsis”",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2013/06636605/12OmNAiFI9s",
"parentPublication": {
"id": "proceedings/avss/2013/0703/0",
"title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460682",
"title": "Key observation selection for effective video synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460682/12OmNAtK4h3",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a081",
"title": "Video Synopsis by Heterogeneous Multi-source Correlation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a081/12OmNxbmSEm",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169855",
"title": "Coherent event-based surveillance video synopsis using trajectory clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169855/12OmNyYm2F4",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2007/1630/0/04408934",
"title": "Webcam Synopsis: Peeking Around the World",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2007/04408934/12OmNzGlRz7",
"parentPublication": {
"id": "proceedings/iccv/2007/1630/0",
"title": "2007 11th IEEE International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2006/2597/1/259710435",
"title": "Making a Long Video Short: Dynamic Video Synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2006/259710435/12OmNzYeB3c",
"parentPublication": {
"id": "proceedings/cvpr/2006/2597/2",
"title": "2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a207",
"title": "Surveillance Video Synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a207/12OmNzt0IrV",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130487",
"title": "A surveillance video analysis and storage scheme for scalable synopsis browsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130487/12OmNzvz6G4",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06702519",
"title": "Object Movements Synopsis viaPart Assembling and Stitching",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06702519/13rRUwkfAZi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/11/ttp2008111971",
"title": "Nonchronological Video Synopsis and Indexing",
"doi": null,
"abstractUrl": "/journal/tp/2008/11/ttp2008111971/13rRUxcKzWm",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqH9hnp",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvnwVjY",
"doi": "10.1109/CVPR.2016.84",
"title": "Discovering the Physical Parts of an Articulated Object Class from Multiple Videos",
"normalizedTitle": "Discovering the Physical Parts of an Articulated Object Class from Multiple Videos",
"abstract": "We propose a motion-based method to discover the physical parts of an articulated object class (e.g. head/torso/leg of a horse) from multiple videos. The key is to find object regions that exhibit consistent motion relative to the rest of the object, across multiple videos. We can then learn a location model for the parts and segment them accurately in the individual videos using an energy function that also enforces temporal and spatial consistency in part motion. Unlike our approach, traditional methods for motion segmentation or non-rigid structure from motion operate on one video at a time. Hence they cannot discover a part unless it displays independent motion in that particular video. We evaluate our method on a new dataset of 32 videos of tigers and horses, where we significantly outperform a recent motion segmentation method on the task of part discovery (obtaining roughly twice the accuracy).",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a motion-based method to discover the physical parts of an articulated object class (e.g. head/torso/leg of a horse) from multiple videos. The key is to find object regions that exhibit consistent motion relative to the rest of the object, across multiple videos. We can then learn a location model for the parts and segment them accurately in the individual videos using an energy function that also enforces temporal and spatial consistency in part motion. Unlike our approach, traditional methods for motion segmentation or non-rigid structure from motion operate on one video at a time. Hence they cannot discover a part unless it displays independent motion in that particular video. We evaluate our method on a new dataset of 32 videos of tigers and horses, where we significantly outperform a recent motion segmentation method on the task of part discovery (obtaining roughly twice the accuracy).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a motion-based method to discover the physical parts of an articulated object class (e.g. head/torso/leg of a horse) from multiple videos. The key is to find object regions that exhibit consistent motion relative to the rest of the object, across multiple videos. We can then learn a location model for the parts and segment them accurately in the individual videos using an energy function that also enforces temporal and spatial consistency in part motion. Unlike our approach, traditional methods for motion segmentation or non-rigid structure from motion operate on one video at a time. Hence they cannot discover a part unless it displays independent motion in that particular video. We evaluate our method on a new dataset of 32 videos of tigers and horses, where we significantly outperform a recent motion segmentation method on the task of part discovery (obtaining roughly twice the accuracy).",
"fno": "8851a714",
"keywords": [
"Videos",
"Motion Segmentation",
"Legged Locomotion",
"Computer Vision",
"Horses",
"Torso"
],
"authors": [
{
"affiliation": null,
"fullName": "Luca Del Pero",
"givenName": "Luca",
"surname": "Del Pero",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Susanna Ricco",
"givenName": "Susanna",
"surname": "Ricco",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rahul Sukthankar",
"givenName": "Rahul",
"surname": "Sukthankar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Vittorio Ferrari",
"givenName": "Vittorio",
"surname": "Ferrari",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "714-723",
"year": "2016",
"issn": "1063-6919",
"isbn": "978-1-4673-8851-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8851a705",
"articleId": "12OmNvStcxS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8851a724",
"articleId": "12OmNBqv2kq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2018/4886/0/488601a494",
"title": "Long-Term Person Re-identification Using True Motion from Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601a494/12OmNBZYTnr",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2018/4652/0/465201a201",
"title": "Human Object Identification for Human-Robot Interaction by Using Fast R-CNN",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2018/465201a201/12OmNBkfRmo",
"parentPublication": {
"id": "proceedings/irc/2018/4652/0",
"title": "2018 Second IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2015/6759/0/07301278",
"title": "Discovering human interactions in videos with limited data labeling",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2015/07301278/12OmNrAMEWc",
"parentPublication": {
"id": "proceedings/cvprw/2015/6759/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118c537",
"title": "Temporal Segmentation of Egocentric Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c537/12OmNs59JN1",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07298827",
"title": "Articulated motion discovery using pairs of trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298827/12OmNwHz07x",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2016/3284/0/08010592",
"title": "Vehicle-pedestrian dynamic interaction through tractography of relative movements and articulated pedestrian pose estimation",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2016/08010592/12OmNx4Q6IA",
"parentPublication": {
"id": "proceedings/aipr/2016/3284/0",
"title": "2016 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457d435",
"title": "Detangling People: Individuating Multiple Close People and Their Body Parts via Region Assembly",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d435/12OmNx8wTgF",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2016/7258/0/07552941",
"title": "Recognize human activities from multi-part missing videos",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2016/07552941/12OmNzcPABJ",
"parentPublication": {
"id": "proceedings/icme/2016/7258/0",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000h593",
"title": "Future Person Localization in First-Person Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000h593/17D45Vw15sY",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800j919",
"title": "SpeedNet: Learning the Speediness in Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800j919/1m3op9iJEze",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNC1GueH",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyvGyiU",
"doi": "",
"title": "A tracking based fast online complete video synopsis approach",
"normalizedTitle": "A tracking based fast online complete video synopsis approach",
"abstract": "By segmenting moving objects out and then densely stitching them into background frames, video synopsis provides an efficient way to condense long videos while preserving most activities. Existing video synopsis methods, however, often suffer from either high computation cost due to global energy minimization or unsatisfactory condense rate to avoid loss of important object activities. To address these problems, a tracking based fast online video synopsis approach is proposed in this paper which makes following three main contributions: 1) an online formulation of the video synopsis problem which makes the approach very fast and scalable to endless surveillance videos with reduced chronological disorders, 2) a tracking based schema which can preserve most object activities, and 3) a complete optimization process from both temporal and spatial redundancies of the video which results in much higher condense rate and less object conflict rate. Experimental results demonstrate the effectiveness and efficiency of proposed approach compared to the traditional method on public surveillance videos.",
"abstracts": [
{
"abstractType": "Regular",
"content": "By segmenting moving objects out and then densely stitching them into background frames, video synopsis provides an efficient way to condense long videos while preserving most activities. Existing video synopsis methods, however, often suffer from either high computation cost due to global energy minimization or unsatisfactory condense rate to avoid loss of important object activities. To address these problems, a tracking based fast online video synopsis approach is proposed in this paper which makes following three main contributions: 1) an online formulation of the video synopsis problem which makes the approach very fast and scalable to endless surveillance videos with reduced chronological disorders, 2) a tracking based schema which can preserve most object activities, and 3) a complete optimization process from both temporal and spatial redundancies of the video which results in much higher condense rate and less object conflict rate. Experimental results demonstrate the effectiveness and efficiency of proposed approach compared to the traditional method on public surveillance videos.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "By segmenting moving objects out and then densely stitching them into background frames, video synopsis provides an efficient way to condense long videos while preserving most activities. Existing video synopsis methods, however, often suffer from either high computation cost due to global energy minimization or unsatisfactory condense rate to avoid loss of important object activities. To address these problems, a tracking based fast online video synopsis approach is proposed in this paper which makes following three main contributions: 1) an online formulation of the video synopsis problem which makes the approach very fast and scalable to endless surveillance videos with reduced chronological disorders, 2) a tracking based schema which can preserve most object activities, and 3) a complete optimization process from both temporal and spatial redundancies of the video which results in much higher condense rate and less object conflict rate. Experimental results demonstrate the effectiveness and efficiency of proposed approach compared to the traditional method on public surveillance videos.",
"fno": "06460540",
"keywords": [
"Image Forensics",
"Image Motion Analysis",
"Image Segmentation",
"Minimisation",
"Object Tracking",
"Spatiotemporal Phenomena",
"Video Surveillance",
"Moving Object Segmentation",
"Background Frame",
"Global Energy Minimization",
"Tracking Based Fast Online Video Synopsis Approach",
"Reduced Chronological Disorder",
"Optimization",
"Temporal Redundancy",
"Spatial Redundancy",
"Public Video Surveillance",
"Streaming Media",
"Pattern Recognition",
"Surveillance",
"Minimization",
"Real Time Systems",
"Redundancy",
"Trajectory"
],
"authors": [
{
"affiliation": "Computer Science and Technology Department, Tsinghua University, Beijing, 100084, China",
"fullName": "Lei Sun",
"givenName": "Lei",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science and Technology Department, Tsinghua University, Beijing, 100084, China",
"fullName": "Junliang Xing",
"givenName": "Junliang",
"surname": "Xing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science and Technology Department, Tsinghua University, Beijing, 100084, China",
"fullName": "Haizhou Ai",
"givenName": "Haizhou",
"surname": "Ai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Development Center, OMRON Social Solutions Co., LTD, Kyoto 619-0283, Japan",
"fullName": "Shihong Lao",
"givenName": "Shihong",
"surname": "Lao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1956-1959",
"year": "2012",
"issn": "1051-4651",
"isbn": "978-1-4673-2216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06460539",
"articleId": "12OmNBigFlm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06460541",
"articleId": "12OmNwp74B1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460682",
"title": "Key observation selection for effective video synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460682/12OmNAtK4h3",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2017/4283/0/4283a319",
"title": "Generalised Spatio Temporal Feature Based Important Activity Synopsis Generation",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2017/4283a319/12OmNBsLPd6",
"parentPublication": {
"id": "proceedings/sitis/2017/4283/0",
"title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2009/3718/0/3718a195",
"title": "Clustered Synopsis of Surveillance Video",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2009/3718a195/12OmNCzKlMi",
"parentPublication": {
"id": "proceedings/avss/2009/3718/0",
"title": "2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/263P2C01",
"title": "Online content-aware video condensation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/263P2C01/12OmNwwuDSM",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169855",
"title": "Coherent event-based surveillance video synopsis using trajectory clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169855/12OmNyYm2F4",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a207",
"title": "Surveillance Video Synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a207/12OmNzt0IrV",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130487",
"title": "A surveillance video analysis and storage scheme for scalable synopsis browsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130487/12OmNzvz6G4",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101664",
"title": "Compact Video Synopsis via Global Spatiotemporal Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101664/13rRUx0xPII",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/5555/01/09964076",
"title": "An improved interaction estimation and optimization method for surveillance video synopsis",
"doi": null,
"abstractUrl": "/magazine/mu/5555/01/09964076/1IAFJKKtWtq",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2020/4272/0/427200a113",
"title": "Video Summarization via Cluster-Based Object Tracking and Type-Based Synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2020/427200a113/1mA9Y74LcXu",
"parentPublication": {
"id": "proceedings/mipr/2020/4272/0",
"title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAkWva5",
"title": "2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "1",
"displayVolume": "1",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzYeB3c",
"doi": "10.1109/CVPR.2006.179",
"title": "Making a Long Video Short: Dynamic Video Synopsis",
"normalizedTitle": "Making a Long Video Short: Dynamic Video Synopsis",
"abstract": "The power of video over still images is the ability to represent dynamic activities. But video browsing and retrieval are inconvenient due to inherent spatio-temporal redundancies, where some time intervals may have no activity, or have activities that occur in a small image region. Video synopsis aims to provide a compact video representation, while preserving the essential activities of the original video. We present dynamic video synopsis, where most of the activity in the video is condensed by simultaneously showing several actions, even when they originally occurred at different times. For example, we can create a \"stroboscopic movie\", where multiple dynamic instances of a moving object are played simultaneously. This is an extension of the still stroboscopic picture. Previous approaches for video abstraction addressed mostly the temporal redundancy by selecting representative key-frames or time intervals. In dynamic video synopsis the activity is shifted into a significantly shorter period, in which the activity is much denser. Video examples can be found online in http://www.vision.huji.ac.il/synopsis",
"abstracts": [
{
"abstractType": "Regular",
"content": "The power of video over still images is the ability to represent dynamic activities. But video browsing and retrieval are inconvenient due to inherent spatio-temporal redundancies, where some time intervals may have no activity, or have activities that occur in a small image region. Video synopsis aims to provide a compact video representation, while preserving the essential activities of the original video. We present dynamic video synopsis, where most of the activity in the video is condensed by simultaneously showing several actions, even when they originally occurred at different times. For example, we can create a \"stroboscopic movie\", where multiple dynamic instances of a moving object are played simultaneously. This is an extension of the still stroboscopic picture. Previous approaches for video abstraction addressed mostly the temporal redundancy by selecting representative key-frames or time intervals. In dynamic video synopsis the activity is shifted into a significantly shorter period, in which the activity is much denser. Video examples can be found online in http://www.vision.huji.ac.il/synopsis",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The power of video over still images is the ability to represent dynamic activities. But video browsing and retrieval are inconvenient due to inherent spatio-temporal redundancies, where some time intervals may have no activity, or have activities that occur in a small image region. Video synopsis aims to provide a compact video representation, while preserving the essential activities of the original video. We present dynamic video synopsis, where most of the activity in the video is condensed by simultaneously showing several actions, even when they originally occurred at different times. For example, we can create a \"stroboscopic movie\", where multiple dynamic instances of a moving object are played simultaneously. This is an extension of the still stroboscopic picture. Previous approaches for video abstraction addressed mostly the temporal redundancy by selecting representative key-frames or time intervals. In dynamic video synopsis the activity is shifted into a significantly shorter period, in which the activity is much denser. Video examples can be found online in http://www.vision.huji.ac.il/synopsis",
"fno": "259710435",
"keywords": [],
"authors": [
{
"affiliation": "Hebrew University of Jerusalem",
"fullName": "Alex Rav-Acha",
"givenName": "Alex",
"surname": "Rav-Acha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hebrew University of Jerusalem",
"fullName": "Yael Pritch",
"givenName": "Yael",
"surname": "Pritch",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hebrew University of Jerusalem",
"fullName": "Shmuel Peleg",
"givenName": "Shmuel",
"surname": "Peleg",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-06-01T00:00:00",
"pubType": "proceedings",
"pages": "435-441",
"year": "2006",
"issn": "1063-6919",
"isbn": "0-7695-2597-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "259710427",
"articleId": "12OmNwpoFKW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "259710442",
"articleId": "12OmNAWH9xa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2013/0703/0/06636605",
"title": "Keynote lecture 2: “Video synopsis”",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2013/06636605/12OmNAiFI9s",
"parentPublication": {
"id": "proceedings/avss/2013/0703/0",
"title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460682",
"title": "Key observation selection for effective video synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460682/12OmNAtK4h3",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2009/3718/0/3718a195",
"title": "Clustered Synopsis of Surveillance Video",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2009/3718a195/12OmNCzKlMi",
"parentPublication": {
"id": "proceedings/avss/2009/3718/0",
"title": "2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a017",
"title": "Online Principal Background Selection for Video Synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a017/12OmNvIxeVi",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2013/1604/0/06618226",
"title": "Demo paper: Video retrieval synopsis for moving objects",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2013/06618226/12OmNvSbBFf",
"parentPublication": {
"id": "proceedings/icmew/2013/1604/0",
"title": "2013 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2007/1630/0/04408934",
"title": "Webcam Synopsis: Peeking Around the World",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2007/04408934/12OmNzGlRz7",
"parentPublication": {
"id": "proceedings/iccv/2007/1630/0",
"title": "2007 11th IEEE International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a207",
"title": "Surveillance Video Synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a207/12OmNzt0IrV",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130487",
"title": "A surveillance video analysis and storage scheme for scalable synopsis browsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130487/12OmNzvz6G4",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101664",
"title": "Compact Video Synopsis via Global Spatiotemporal Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101664/13rRUx0xPII",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/11/ttp2008111971",
"title": "Nonchronological Video Synopsis and Indexing",
"doi": null,
"abstractUrl": "/journal/tp/2008/11/ttp2008111971/13rRUxcKzWm",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAY79oC",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"acronym": "icvgip",
"groupId": "1800020",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzt0IrV",
"doi": "10.1109/ICVGIP.2008.84",
"title": "Surveillance Video Synopsis",
"normalizedTitle": "Surveillance Video Synopsis",
"abstract": "Video is a powerful tool to show various activities but generally we use still images to show a condensed video, which is problematic in viewing and comprehending. Thus, there is a need for a summarized surveillance video. A fundamental goal of any video summarization or synopsis technique with reference to a surveillance video is to reduce the Spatio-temporal redundancy. The activity in any surveillance video is very less as compared to the total length of the video. The spatial redundancy is removed by showing two activities that happened in different frames at different spatial locations in a single frame. Temporal redundancy is removed by detecting the frames having low activity and then deleting those frames. We then generate a stroboscopic video, which traces path of the extracted object. Lastly, we introduce a Media Player, which indexes the video synopsis to the original video demonstrating how the video synopsis can be used as an effective tool.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Video is a powerful tool to show various activities but generally we use still images to show a condensed video, which is problematic in viewing and comprehending. Thus, there is a need for a summarized surveillance video. A fundamental goal of any video summarization or synopsis technique with reference to a surveillance video is to reduce the Spatio-temporal redundancy. The activity in any surveillance video is very less as compared to the total length of the video. The spatial redundancy is removed by showing two activities that happened in different frames at different spatial locations in a single frame. Temporal redundancy is removed by detecting the frames having low activity and then deleting those frames. We then generate a stroboscopic video, which traces path of the extracted object. Lastly, we introduce a Media Player, which indexes the video synopsis to the original video demonstrating how the video synopsis can be used as an effective tool.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Video is a powerful tool to show various activities but generally we use still images to show a condensed video, which is problematic in viewing and comprehending. Thus, there is a need for a summarized surveillance video. A fundamental goal of any video summarization or synopsis technique with reference to a surveillance video is to reduce the Spatio-temporal redundancy. The activity in any surveillance video is very less as compared to the total length of the video. The spatial redundancy is removed by showing two activities that happened in different frames at different spatial locations in a single frame. Temporal redundancy is removed by detecting the frames having low activity and then deleting those frames. We then generate a stroboscopic video, which traces path of the extracted object. Lastly, we introduce a Media Player, which indexes the video synopsis to the original video demonstrating how the video synopsis can be used as an effective tool.",
"fno": "3476a207",
"keywords": [
"Surveillance",
"Video",
"Synopsis"
],
"authors": [
{
"affiliation": null,
"fullName": "Vikas Choudhary",
"givenName": "Vikas",
"surname": "Choudhary",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Anil K. Tiwari",
"givenName": "Anil K.",
"surname": "Tiwari",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvgip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "207-212",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3476-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3476a193",
"articleId": "12OmNzX6cjI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3476a213",
"articleId": "12OmNzvQHQD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2013/0703/0/06636605",
"title": "Keynote lecture 2: “Video synopsis”",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2013/06636605/12OmNAiFI9s",
"parentPublication": {
"id": "proceedings/avss/2013/0703/0",
"title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460682",
"title": "Key observation selection for effective video synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460682/12OmNAtK4h3",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2009/3718/0/3718a195",
"title": "Clustered Synopsis of Surveillance Video",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2009/3718a195/12OmNCzKlMi",
"parentPublication": {
"id": "proceedings/avss/2009/3718/0",
"title": "2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169855",
"title": "Coherent event-based surveillance video synopsis using trajectory clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169855/12OmNyYm2F4",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460540",
"title": "A tracking based fast online complete video synopsis approach",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460540/12OmNyvGyiU",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2006/2597/1/259710435",
"title": "Making a Long Video Short: Dynamic Video Synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2006/259710435/12OmNzYeB3c",
"parentPublication": {
"id": "proceedings/cvpr/2006/2597/2",
"title": "2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130487",
"title": "A surveillance video analysis and storage scheme for scalable synopsis browsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130487/12OmNzvz6G4",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101664",
"title": "Compact Video Synopsis via Global Spatiotemporal Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101664/13rRUx0xPII",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/11/ttp2008111971",
"title": "Nonchronological Video Synopsis and Indexing",
"doi": null,
"abstractUrl": "/journal/tp/2008/11/ttp2008111971/13rRUxcKzWm",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/5555/01/09964076",
"title": "An improved interaction estimation and optimization method for surveillance video synopsis",
"doi": null,
"abstractUrl": "/magazine/mu/5555/01/09964076/1IAFJKKtWtq",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1mA9XpQUfWo",
"title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"acronym": "mipr",
"groupId": "1825825",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1mA9Y74LcXu",
"doi": "10.1109/MIPR49039.2020.00030",
"title": "Video Summarization via Cluster-Based Object Tracking and Type-Based Synopsis",
"normalizedTitle": "Video Summarization via Cluster-Based Object Tracking and Type-Based Synopsis",
"abstract": "In this paper, we construct a trajectory-based system for the synopsis of surveillance video stream. The proposed approach first applies a cluster-based tracking method to extract foreground object from input videos, then extracts the abnormal object and classifies them into different motion patterns. Finally a type-based synopsis scheme is proposed to properly gather the moving object of different pattern types into limited time endurance. As a consequence, this system would be helpful for accurate and fast surveillance videos analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we construct a trajectory-based system for the synopsis of surveillance video stream. The proposed approach first applies a cluster-based tracking method to extract foreground object from input videos, then extracts the abnormal object and classifies them into different motion patterns. Finally a type-based synopsis scheme is proposed to properly gather the moving object of different pattern types into limited time endurance. As a consequence, this system would be helpful for accurate and fast surveillance videos analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we construct a trajectory-based system for the synopsis of surveillance video stream. The proposed approach first applies a cluster-based tracking method to extract foreground object from input videos, then extracts the abnormal object and classifies them into different motion patterns. Finally a type-based synopsis scheme is proposed to properly gather the moving object of different pattern types into limited time endurance. As a consequence, this system would be helpful for accurate and fast surveillance videos analysis.",
"fno": "427200a113",
"keywords": [
"Feature Extraction",
"Image Classification",
"Image Motion Analysis",
"Object Tracking",
"Pattern Clustering",
"Video Signal Processing",
"Video Streaming",
"Video Surveillance",
"Motion Patterns",
"Type Based Synopsis Scheme",
"Moving Object",
"Fast Surveillance Videos Analysis",
"Video Summarization",
"Trajectory Based System",
"Surveillance Video Stream",
"Foreground Object",
"Abnormal Object",
"Cluster Based Object Tracking",
"Trajectory",
"Surveillance",
"Streaming Media",
"Object Tracking",
"Conferences",
"Feature Extraction",
"Video Synopsis",
"Tracking",
"System"
],
"authors": [
{
"affiliation": "Shanghai Jiao Tong University",
"fullName": "Yuxi Li",
"givenName": "Yuxi",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University",
"fullName": "Weiyao Lin",
"givenName": "Weiyao",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University",
"fullName": "Tao Wang",
"givenName": "Tao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research & Advanced Technology Division, SAIC Motor Corporation Limited",
"fullName": "Qi Guo",
"givenName": "Qi",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research & Advanced Technology Division, SAIC Motor Corporation Limited",
"fullName": "Ruijia Yang",
"givenName": "Ruijia",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Institute for Advanced Communication and Data Science, Shanghai Univerisity",
"fullName": "Shugong Xu",
"givenName": "Shugong",
"surname": "Xu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mipr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-08-01T00:00:00",
"pubType": "proceedings",
"pages": "113-116",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-4272-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "427200a109",
"articleId": "1mA9ZbLVzRm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "427200a117",
"articleId": "1mAa12bGZvG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460682",
"title": "Key observation selection for effective video synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460682/12OmNAtK4h3",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/263P2C01",
"title": "Online content-aware video condensation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/263P2C01/12OmNwwuDSM",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2018/1857/0/185701a368",
"title": "Determining the Necessary Frame Rate of Video Data for Object Tracking under Accuracy Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2018/185701a368/12OmNx0A7MO",
"parentPublication": {
"id": "proceedings/mipr/2018/1857/0",
"title": "2018 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169855",
"title": "Coherent event-based surveillance video synopsis using trajectory clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169855/12OmNyYm2F4",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460540",
"title": "A tracking based fast online complete video synopsis approach",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460540/12OmNyvGyiU",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2016/4571/0/4571a391",
"title": "Detection of the Periodicity of Human Actions for Efficient Video Summarization",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a391/12OmNzTppB8",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a207",
"title": "Surveillance Video Synopsis",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a207/12OmNzt0IrV",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130487",
"title": "A surveillance video analysis and storage scheme for scalable synopsis browsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130487/12OmNzvz6G4",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101664",
"title": "Compact Video Synopsis via Global Spatiotemporal Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101664/13rRUx0xPII",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/5555/01/09964076",
"title": "An improved interaction estimation and optimization method for surveillance video synopsis",
"doi": null,
"abstractUrl": "/magazine/mu/5555/01/09964076/1IAFJKKtWtq",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNywfKys",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "3",
"displayVolume": "3",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNA0MZ8q",
"doi": "10.1109/ICPR.2002.1048004",
"title": "Multibaseline Stereo in the Presence of Specular Reflections",
"normalizedTitle": "Multibaseline Stereo in the Presence of Specular Reflections",
"abstract": "We address the problem of accurate depth estimation using multibaseline stereo in the presence of specular reflections. Specular reflections can cause the intensity and color of corresponding points to change dramatically according to different viewpoints, thus producing severe matching errors for various stereo algorithms. In this paper, we propose a new method to deal with this problem by treating specular reflections as occlusions. Our idea is to first detect specular pixels by computing the uncertainty of depth estimates. Then we combine the use of flexible windows and an adaptively selected subset of images to avoid these specular areas in all the multibaseline stereo images. Even though specularities may exist in the reference image, accurate depth is nevertheless estimated for all pixels. Experiments show that our consideration of specular reflections leads to improved stereo results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We address the problem of accurate depth estimation using multibaseline stereo in the presence of specular reflections. Specular reflections can cause the intensity and color of corresponding points to change dramatically according to different viewpoints, thus producing severe matching errors for various stereo algorithms. In this paper, we propose a new method to deal with this problem by treating specular reflections as occlusions. Our idea is to first detect specular pixels by computing the uncertainty of depth estimates. Then we combine the use of flexible windows and an adaptively selected subset of images to avoid these specular areas in all the multibaseline stereo images. Even though specularities may exist in the reference image, accurate depth is nevertheless estimated for all pixels. Experiments show that our consideration of specular reflections leads to improved stereo results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We address the problem of accurate depth estimation using multibaseline stereo in the presence of specular reflections. Specular reflections can cause the intensity and color of corresponding points to change dramatically according to different viewpoints, thus producing severe matching errors for various stereo algorithms. In this paper, we propose a new method to deal with this problem by treating specular reflections as occlusions. Our idea is to first detect specular pixels by computing the uncertainty of depth estimates. Then we combine the use of flexible windows and an adaptively selected subset of images to avoid these specular areas in all the multibaseline stereo images. Even though specularities may exist in the reference image, accurate depth is nevertheless estimated for all pixels. Experiments show that our consideration of specular reflections leads to improved stereo results.",
"fno": "169530573",
"keywords": [],
"authors": [
{
"affiliation": "Chinese Academy of Sciences",
"fullName": "Yuanzhen Li",
"givenName": "Yuanzhen",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research, Asia",
"fullName": "Stephen Lin",
"givenName": "Stephen",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese Academy of Sciences",
"fullName": "Hanqing Lu",
"givenName": "Hanqing",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research",
"fullName": "Sing Bing Kang",
"givenName": "Sing Bing",
"surname": "Kang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research, Asia",
"fullName": "Heung-Yeung Shum",
"givenName": "Heung-Yeung",
"surname": "Shum",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-08-01T00:00:00",
"pubType": "proceedings",
"pages": "30573",
"year": "2002",
"issn": "1051-4651",
"isbn": "0-7695-1695-X",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "169530569",
"articleId": "12OmNwc3wrr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "169530577",
"articleId": "12OmNzlUKh1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2016/0641/0/07477643",
"title": "Unifying diffuse and specular reflections for the photometric stereo problem",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477643/12OmNAsBFHt",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420088",
"title": "A multibaseline stereo system with active illumination and real-time image acquisition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420088/12OmNAtaS4J",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1996/7258/0/72580364",
"title": "3-D Scene Data Recovery using Omnidirectional Multibaseline Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1996/72580364/12OmNrkBwqP",
"parentPublication": {
"id": "proceedings/cvpr/1996/7258/0",
"title": "Proceedings CVPR IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2002/1521/0/15210626",
"title": "Variational Multiframe Stereo in the Presence of Specular Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2002/15210626/12OmNwekjEM",
"parentPublication": {
"id": "proceedings/3dpvt/2002/1521/0",
"title": "3D Data Processing Visualization and Transmission, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70421086",
"title": "Stereo in the presence of specular reflection",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70421086/12OmNxAlA4c",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/1/07294821",
"title": "Generic and real-time detection of specular reflections in images",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07294821/12OmNxETaga",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/1",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csit/2013/2460/0/06710353",
"title": "Automatic detection and concealment of specular reflections for endoscopic images",
"doi": null,
"abstractUrl": "/proceedings-article/csit/2013/06710353/12OmNyKa5Zo",
"parentPublication": {
"id": "proceedings/csit/2013/2460/0",
"title": "2013 Computer Science and Information Technologies (CSIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06803934",
"title": "Second-Order Feed-Forward Renderingfor Specular and Glossy Reflections",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06803934/13rRUwInvyA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2000/03/v0253",
"title": "Perturbation Methods for Interactive Specular Reflections",
"doi": null,
"abstractUrl": "/journal/tg/2000/03/v0253/13rRUwj7coZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956564",
"title": "Specular Streaks in Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956564/1IHoO1Bjv1K",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwbcJ4l",
"title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAXxX7s",
"doi": "10.1109/ICCV.2011.6126291",
"title": "Pose estimation from reflections for specular surface recovery",
"normalizedTitle": "Pose estimation from reflections for specular surface recovery",
"abstract": "This paper addresses the problem of estimating the poses of a reference plane in specular shape recovery. Unlike existing methods which require an extra mirror or an extra reference plane and camera, our proposed method recovers the poses of the reference plane directly from its reflections on the specular surface. By establishing reflection correspondences on the reference plane in three distinct poses, our method estimates the poses of the reference plane in two steps. First, by applying a colinearity constraint to the reflection correspondences, a simple closed-form solution is derived for recovering the poses of the reference plane relative to its initial pose. Second, by applying a ray incidence constraint to the incident rays formed by the reflection correspondences and the visual rays cast from the image, a closed-form solution is derived for recovering the poses of the reference plane relative to the camera. The shape of the specular surface then follows. Experimental results on both synthetic and real data are presented, which demonstrate the feasibility and accuracy of our proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper addresses the problem of estimating the poses of a reference plane in specular shape recovery. Unlike existing methods which require an extra mirror or an extra reference plane and camera, our proposed method recovers the poses of the reference plane directly from its reflections on the specular surface. By establishing reflection correspondences on the reference plane in three distinct poses, our method estimates the poses of the reference plane in two steps. First, by applying a colinearity constraint to the reflection correspondences, a simple closed-form solution is derived for recovering the poses of the reference plane relative to its initial pose. Second, by applying a ray incidence constraint to the incident rays formed by the reflection correspondences and the visual rays cast from the image, a closed-form solution is derived for recovering the poses of the reference plane relative to the camera. The shape of the specular surface then follows. Experimental results on both synthetic and real data are presented, which demonstrate the feasibility and accuracy of our proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper addresses the problem of estimating the poses of a reference plane in specular shape recovery. Unlike existing methods which require an extra mirror or an extra reference plane and camera, our proposed method recovers the poses of the reference plane directly from its reflections on the specular surface. By establishing reflection correspondences on the reference plane in three distinct poses, our method estimates the poses of the reference plane in two steps. First, by applying a colinearity constraint to the reflection correspondences, a simple closed-form solution is derived for recovering the poses of the reference plane relative to its initial pose. Second, by applying a ray incidence constraint to the incident rays formed by the reflection correspondences and the visual rays cast from the image, a closed-form solution is derived for recovering the poses of the reference plane relative to the camera. The shape of the specular surface then follows. Experimental results on both synthetic and real data are presented, which demonstrate the feasibility and accuracy of our proposed method.",
"fno": "06126291",
"keywords": [],
"authors": [
{
"affiliation": "The University of Hong Kong, Hong Kong",
"fullName": "Miaomiao Liu",
"givenName": "Miaomiao",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Hong Kong, Hong Kong",
"fullName": "Kwan-Yee K. Wong",
"givenName": "Kwan-Yee K.",
"surname": "Wong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Frankfurt Institute for Advanced Studies, Germany",
"fullName": "Zhenwen Dai",
"givenName": "Zhenwen",
"surname": "Dai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Hong Kong, Hong Kong",
"fullName": "Zhihu Chen",
"givenName": "Zhihu",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "579-586",
"year": "2011",
"issn": null,
"isbn": "978-1-4577-1101-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06126277",
"articleId": "12OmNqGiu1t",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06126417",
"articleId": "12OmNyrqzsV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2002/1695/3/169530573",
"title": "Multibaseline Stereo in the Presence of Specular Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169530573/12OmNA0MZ8q",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/3",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851b772",
"title": "Mirror Surface Reconstruction under an Uncalibrated Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851b772/12OmNANkogl",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/1999/0062/0/00620526",
"title": "Recovery of Shape and Surface Reflectance of Specular Object from Rotation of Light Source",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/1999/00620526/12OmNBiygzr",
"parentPublication": {
"id": "proceedings/3dim/1999/0062/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206820",
"title": "3D pose estimation and segmentation using specular cues",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206820/12OmNrMZpHb",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995532",
"title": "Probabilistic simultaneous pose and non-rigid shape recovery",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995532/12OmNwpoFB7",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130330",
"title": "Estimating the unknown poses of a reference plane for specular shape recovery",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130330/12OmNx2QUJd",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2010/6984/0/05539826",
"title": "Specular surface reconstruction from sparse reflection correspondences",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2010/05539826/12OmNyo1nKD",
"parentPublication": {
"id": "proceedings/cvpr/2010/6984/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420740",
"title": "A theory of specular surface geometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420740/12OmNzAoi2R",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1997/05/i0513",
"title": "3D Surface Estimation and Model Construction From Specular Motion in Image Sequences",
"doi": null,
"abstractUrl": "/journal/tp/1997/05/i0513/13rRUwgyOko",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956564",
"title": "Specular Streaks in Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956564/1IHoO1Bjv1K",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy4r3R2",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrMZpHb",
"doi": "10.1109/CVPR.2009.5206820",
"title": "3D pose estimation and segmentation using specular cues",
"normalizedTitle": "3D pose estimation and segmentation using specular cues",
"abstract": "We present a system for fast model-based segmentation and 3D pose estimation of specular objects using appearance based specular features. We use observed (a) specular reflection and (b) specular flow as cues, which are matched against similar cues generated from a CAD model of the object in various poses. We avoid estimating 3D geometry or depths, which is difficult and unreliable for specular scenes. In the first method, the environment map of the scene is utilized to generate a database containing synthesized specular reflections of the object for densely sampled 3D poses. This database is compared with captured images of the scene at run time to locate and estimate the 3D pose of the object. In the second method, specular flows are generated for dense 3D poses as illumination invariant features and are matched to the specular flow of the scene. We incorporate several practical heuristics such as use of saturated/highlight pixels for fast matching and normal selection to minimize the effects of inter-reflections and cluttered backgrounds. Despite its simplicity, our approach is effective in scenes with multiple specular objects, partial occlusions, inter-reflections, cluttered backgrounds and changes in ambient illumination. Experimental results demonstrate the effectiveness of our method for various synthetic and real objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a system for fast model-based segmentation and 3D pose estimation of specular objects using appearance based specular features. We use observed (a) specular reflection and (b) specular flow as cues, which are matched against similar cues generated from a CAD model of the object in various poses. We avoid estimating 3D geometry or depths, which is difficult and unreliable for specular scenes. In the first method, the environment map of the scene is utilized to generate a database containing synthesized specular reflections of the object for densely sampled 3D poses. This database is compared with captured images of the scene at run time to locate and estimate the 3D pose of the object. In the second method, specular flows are generated for dense 3D poses as illumination invariant features and are matched to the specular flow of the scene. We incorporate several practical heuristics such as use of saturated/highlight pixels for fast matching and normal selection to minimize the effects of inter-reflections and cluttered backgrounds. Despite its simplicity, our approach is effective in scenes with multiple specular objects, partial occlusions, inter-reflections, cluttered backgrounds and changes in ambient illumination. Experimental results demonstrate the effectiveness of our method for various synthetic and real objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a system for fast model-based segmentation and 3D pose estimation of specular objects using appearance based specular features. We use observed (a) specular reflection and (b) specular flow as cues, which are matched against similar cues generated from a CAD model of the object in various poses. We avoid estimating 3D geometry or depths, which is difficult and unreliable for specular scenes. In the first method, the environment map of the scene is utilized to generate a database containing synthesized specular reflections of the object for densely sampled 3D poses. This database is compared with captured images of the scene at run time to locate and estimate the 3D pose of the object. In the second method, specular flows are generated for dense 3D poses as illumination invariant features and are matched to the specular flow of the scene. We incorporate several practical heuristics such as use of saturated/highlight pixels for fast matching and normal selection to minimize the effects of inter-reflections and cluttered backgrounds. Despite its simplicity, our approach is effective in scenes with multiple specular objects, partial occlusions, inter-reflections, cluttered backgrounds and changes in ambient illumination. Experimental results demonstrate the effectiveness of our method for various synthetic and real objects.",
"fno": "05206820",
"keywords": [
"Image Segmentation",
"Pose Estimation",
"3 D Pose Estimation",
"Specular Cues Segmentation",
"Fast Model Based Segmentation",
"Specular Reflection",
"3 D Geometry Estimation",
"Database",
"Specular Flows",
"Illumination Invariant Features",
"Cluttered Background",
"Ambient Illumination",
"CAD Model",
"Layout",
"Lighting",
"Reflection",
"Image Segmentation",
"Image Databases",
"Spatial Databases",
"Mirrors",
"Rendering Computer Graphics",
"Geometry",
"Research And Development"
],
"authors": [
{
"affiliation": "Mitsubishi Electric Research Labs (MERL), 201 Broadway, Cambridge, MA, USA",
"fullName": "Ju Yong Chang",
"givenName": "Ju Yong",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIT Media Lab, 20 Ames St., Cambridge, MA, USA",
"fullName": "Ramesh Raskar",
"givenName": "Ramesh",
"surname": "Raskar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mitsubishi Electric Research Labs (MERL), 201 Broadway, Cambridge, MA, USA",
"fullName": "Amit Agrawal",
"givenName": "Amit",
"surname": "Agrawal",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1706-1713",
"year": "2009",
"issn": "1063-6919",
"isbn": "978-1-4244-3992-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05206819",
"articleId": "12OmNBSSV9f",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05206817",
"articleId": "12OmNC1Y5qz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pmcvg/1999/0271/0/02710039",
"title": "Generation of Diffuse and Specular Appearance from Photometric Images",
"doi": null,
"abstractUrl": "/proceedings-article/pmcvg/1999/02710039/12OmNA0vnOn",
"parentPublication": {
"id": "proceedings/pmcvg/1999/0271/0",
"title": "Photometric Modeling for Computer Vision and Graphics, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2011/1101/0/06126291",
"title": "Pose estimation from reflections for specular surface recovery",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2011/06126291/12OmNAXxX7s",
"parentPublication": {
"id": "proceedings/iccv/2011/1101/0",
"title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130330",
"title": "Estimating the unknown poses of a reference plane for specular shape recovery",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130330/12OmNx2QUJd",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/1/07294821",
"title": "Generic and real-time detection of specular reflections in images",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07294821/12OmNxETaga",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/1",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csit/2013/2460/0/06710353",
"title": "Automatic detection and concealment of specular reflections for endoscopic images",
"doi": null,
"abstractUrl": "/proceedings-article/csit/2013/06710353/12OmNyKa5Zo",
"parentPublication": {
"id": "proceedings/csit/2013/2460/0",
"title": "2013 Computer Science and Information Technologies (CSIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995673",
"title": "Using specular highlights as pose invariant features for 2D-3D pose estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995673/12OmNzahc4k",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2002/1521/0/15210356",
"title": "Second order local analysis for 3D reconstruction of specular surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2002/15210356/12OmNzayNvZ",
"parentPublication": {
"id": "proceedings/3dpvt/2002/1521/0",
"title": "3D Data Processing Visualization and Transmission, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06803934",
"title": "Second-Order Feed-Forward Renderingfor Specular and Glossy Reflections",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06803934/13rRUwInvyA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/03/ttp2013030639",
"title": "Recognition Using Specular Highlights",
"doi": null,
"abstractUrl": "/journal/tp/2013/03/ttp2013030639/13rRUyogGBp",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956564",
"title": "Specular Streaks in Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956564/1IHoO1Bjv1K",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCcbEdk",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"acronym": "visapp",
"groupId": "1806906",
"volume": "1",
"displayVolume": "1",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxETaga",
"doi": "",
"title": "Generic and real-time detection of specular reflections in images",
"normalizedTitle": "Generic and real-time detection of specular reflections in images",
"abstract": "In this paper, we propose a generic and efficient method for real-time specular reflections detection in images. The method relies on a new thresholding technique applied in the Hue-Saturation-Value (HSV) color space. A detailed experimental study was conducted in this color space to highlight specular reflections' properties. Current state-of-the-art methods have difficulties with lighting jumps by being too specific or computationally expensive for real-time applications. Our method addresses this problem using the following three steps: an adaptation of the contrast of the image to handle lighting jumps, an automatic thresholding to isolate specular reflections and a post-processing step to further reduce the number of false detections. This method has been compared with the state-of-the-art according to our two proposed experimental protocols based on contours and gravity center and offers fast and accurate results without a priori on the image in real-time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a generic and efficient method for real-time specular reflections detection in images. The method relies on a new thresholding technique applied in the Hue-Saturation-Value (HSV) color space. A detailed experimental study was conducted in this color space to highlight specular reflections' properties. Current state-of-the-art methods have difficulties with lighting jumps by being too specific or computationally expensive for real-time applications. Our method addresses this problem using the following three steps: an adaptation of the contrast of the image to handle lighting jumps, an automatic thresholding to isolate specular reflections and a post-processing step to further reduce the number of false detections. This method has been compared with the state-of-the-art according to our two proposed experimental protocols based on contours and gravity center and offers fast and accurate results without a priori on the image in real-time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a generic and efficient method for real-time specular reflections detection in images. The method relies on a new thresholding technique applied in the Hue-Saturation-Value (HSV) color space. A detailed experimental study was conducted in this color space to highlight specular reflections' properties. Current state-of-the-art methods have difficulties with lighting jumps by being too specific or computationally expensive for real-time applications. Our method addresses this problem using the following three steps: an adaptation of the contrast of the image to handle lighting jumps, an automatic thresholding to isolate specular reflections and a post-processing step to further reduce the number of false detections. This method has been compared with the state-of-the-art according to our two proposed experimental protocols based on contours and gravity center and offers fast and accurate results without a priori on the image in real-time.",
"fno": "07294821",
"keywords": [
"Image Segmentation",
"Brightness",
"Reflection",
"Complexity Theory",
"Image Color Analysis",
"Histograms",
"Robustness",
"Gravity Center",
"Real Time",
"Generic",
"Specular Reflection Detection",
"HSV",
"Saturation",
"Value",
"Contrast",
"Gradient"
],
"authors": [
{
"affiliation": "Vision & Content Engineering Laboratory, CEA LIST, Gif-sur-Yvette, France",
"fullName": "Alexandre Morgand",
"givenName": "Alexandre",
"surname": "Morgand",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vision & Content Engineering Laboratory, CEA LIST, Gif-sur-Yvette, France",
"fullName": "Mohamed Tamaazousti",
"givenName": "Mohamed",
"surname": "Tamaazousti",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "visapp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-01-01T00:00:00",
"pubType": "proceedings",
"pages": "274-282",
"year": "2014",
"issn": null,
"isbn": "978-9-8975-8133-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07294820",
"articleId": "12OmNBbJTn2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07294822",
"articleId": "12OmNBTawl4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2002/1695/3/169530573",
"title": "Multibaseline Stereo in the Presence of Specular Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169530573/12OmNA0MZ8q",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/3",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pmcvg/1999/0271/0/02710039",
"title": "Generation of Diffuse and Specular Appearance from Photometric Images",
"doi": null,
"abstractUrl": "/proceedings-article/pmcvg/1999/02710039/12OmNA0vnOn",
"parentPublication": {
"id": "proceedings/pmcvg/1999/0271/0",
"title": "Photometric Modeling for Computer Vision and Graphics, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2014/4258/0/4258a282",
"title": "Automatic Segmentation of Specular Reflections for Endoscopic Images Based on Sparse and Low-Rank Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2014/4258a282/12OmNBZYTrB",
"parentPublication": {
"id": "proceedings/sibgrapi/2014/4258/0",
"title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2016/2179/0/2179a125",
"title": "A Specular Reflection Suppression Method for Endoscopic Images",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2016/2179a125/12OmNx76TRR",
"parentPublication": {
"id": "proceedings/bigmm/2016/2179/0",
"title": "2016 IEEE Second International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csit/2013/2460/0/06710353",
"title": "Automatic detection and concealment of specular reflections for endoscopic images",
"doi": null,
"abstractUrl": "/proceedings-article/csit/2013/06710353/12OmNyKa5Zo",
"parentPublication": {
"id": "proceedings/csit/2013/2460/0",
"title": "2013 Computer Science and Information Technologies (CSIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2009/4534/0/05559012",
"title": "Image-based separation of diffuse and specular reflections using environmental structured illumination",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2009/05559012/12OmNyRPgVx",
"parentPublication": {
"id": "proceedings/iccp/2009/4534/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ias/2009/3744/2/3744b725",
"title": "Reducing Specular Reflection Components of Chrome-Plated Surface with Varying Light Direction",
"doi": null,
"abstractUrl": "/proceedings-article/ias/2009/3744b725/12OmNz5JBNv",
"parentPublication": {
"id": "proceedings/ias/2009/3744/2",
"title": "Information Assurance and Security, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1999/0164/2/01640855",
"title": "Estimation of Diffuse and Specular Appearance",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1999/01640855/12OmNz61dqv",
"parentPublication": {
"id": "proceedings/iccv/1999/0164/2",
"title": "Proceedings of the Seventh IEEE International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06803934",
"title": "Second-Order Feed-Forward Renderingfor Specular and Glossy Reflections",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06803934/13rRUwInvyA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09018202",
"title": "Detecting Specular Reflections and Cast Shadows to Estimate Reflectance and Illumination of Dynamic Indoor Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09018202/1hN4BrDSVHi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy6qfOH",
"title": "2013 Computer Science and Information Technologies (CSIT)",
"acronym": "csit",
"groupId": "1002437",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyKa5Zo",
"doi": "10.1109/CSITechnol.2013.6710353",
"title": "Automatic detection and concealment of specular reflections for endoscopic images",
"normalizedTitle": "Automatic detection and concealment of specular reflections for endoscopic images",
"abstract": "Endoscopy is a minimally invasive medical diagnostic procedure, which is used for observing the surfaces of organs inside human body. The surfaces usually contain specular reflections from distributing light sources, which are visible in endoscopic images and videos. For many computer vision algorithms the highlights created by specular reflections may become significant source of error. In this paper, we propose: (a) a method for segmentation of highlights based on adoptive colour thresholding and contour analyses and (b) a powerful inpainting method, which fill the highlighted regions with information propagated from adjacent areas. The inpainting algorithm conceals the specular reflections with high accuracy and provides visually pleasurable appearance of endoscopic images and videos. The methods are compared with related approaches reported for processing of endoscopic images and the efficiency is demonstrated on large set of endoscopic images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Endoscopy is a minimally invasive medical diagnostic procedure, which is used for observing the surfaces of organs inside human body. The surfaces usually contain specular reflections from distributing light sources, which are visible in endoscopic images and videos. For many computer vision algorithms the highlights created by specular reflections may become significant source of error. In this paper, we propose: (a) a method for segmentation of highlights based on adoptive colour thresholding and contour analyses and (b) a powerful inpainting method, which fill the highlighted regions with information propagated from adjacent areas. The inpainting algorithm conceals the specular reflections with high accuracy and provides visually pleasurable appearance of endoscopic images and videos. The methods are compared with related approaches reported for processing of endoscopic images and the efficiency is demonstrated on large set of endoscopic images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Endoscopy is a minimally invasive medical diagnostic procedure, which is used for observing the surfaces of organs inside human body. The surfaces usually contain specular reflections from distributing light sources, which are visible in endoscopic images and videos. For many computer vision algorithms the highlights created by specular reflections may become significant source of error. In this paper, we propose: (a) a method for segmentation of highlights based on adoptive colour thresholding and contour analyses and (b) a powerful inpainting method, which fill the highlighted regions with information propagated from adjacent areas. The inpainting algorithm conceals the specular reflections with high accuracy and provides visually pleasurable appearance of endoscopic images and videos. The methods are compared with related approaches reported for processing of endoscopic images and the efficiency is demonstrated on large set of endoscopic images.",
"fno": "06710353",
"keywords": [
"Reflection",
"Image Segmentation",
"Algorithm Design And Analysis",
"Histograms",
"Image Color Analysis",
"Discrete Fourier Transforms",
"Computer Vision",
"Medical Image Processing",
"Endoscopic Images",
"Digital Inpainting",
"Concealment Of Specular Reflections",
"Highlights Segmentation",
"Computer Vision"
],
"authors": [
{
"affiliation": "Institute for Informatics and Automation Problems of NAS RA, Yerevan, Armenia",
"fullName": "Gevorg Karapetyan",
"givenName": "Gevorg",
"surname": "Karapetyan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute for Informatics and Automation Problems of NAS RA, Yerevan, Armenia",
"fullName": "Hakob Sarukhanyan",
"givenName": "Hakob",
"surname": "Sarukhanyan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "csit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2460-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06710352",
"articleId": "12OmNBOCWxS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06710354",
"articleId": "12OmNvqmUIp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2002/1695/3/169530573",
"title": "Multibaseline Stereo in the Presence of Specular Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169530573/12OmNA0MZ8q",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/3",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pmcvg/1999/0271/0/02710039",
"title": "Generation of Diffuse and Specular Appearance from Photometric Images",
"doi": null,
"abstractUrl": "/proceedings-article/pmcvg/1999/02710039/12OmNA0vnOn",
"parentPublication": {
"id": "proceedings/pmcvg/1999/0271/0",
"title": "Photometric Modeling for Computer Vision and Graphics, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2014/4258/0/4258a282",
"title": "Automatic Segmentation of Specular Reflections for Endoscopic Images Based on Sparse and Low-Rank Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2014/4258a282/12OmNBZYTrB",
"parentPublication": {
"id": "proceedings/sibgrapi/2014/4258/0",
"title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206820",
"title": "3D pose estimation and segmentation using specular cues",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206820/12OmNrMZpHb",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2016/2179/0/2179a125",
"title": "A Specular Reflection Suppression Method for Endoscopic Images",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2016/2179a125/12OmNx76TRR",
"parentPublication": {
"id": "proceedings/bigmm/2016/2179/0",
"title": "2016 IEEE Second International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/1/07294821",
"title": "Generic and real-time detection of specular reflections in images",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07294821/12OmNxETaga",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/1",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206624",
"title": "Recovering specular surfaces using curved line images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206624/12OmNxG1yX3",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06803934",
"title": "Second-Order Feed-Forward Renderingfor Specular and Glossy Reflections",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06803934/13rRUwInvyA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2000/03/v0253",
"title": "Perturbation Methods for Interactive Specular Reflections",
"doi": null,
"abstractUrl": "/journal/tg/2000/03/v0253/13rRUwj7coZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2017/3220/1/08005934",
"title": "A Specular Reflection Removal Method for Large Scale Ocean Surface Images",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2017/08005934/17D45VN31i5",
"parentPublication": {
"id": "proceedings/cse-euc/2017/3220/1",
"title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.