data
dict
{ "proceeding": { "id": "12OmNvRU0cK", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNBzAciw", "doi": "10.1109/ISMAR.2016.26", "title": "The Influence of using Augmented Reality on Textbook Support for Learners of Different Learning Styles", "normalizedTitle": "The Influence of using Augmented Reality on Textbook Support for Learners of Different Learning Styles", "abstract": "It has been shown in numerous studies that the application of Augmented Reality (AR) to teaching and learning is beneficial, but determining the reasons behind its effectiveness, and in particular the characteristics of students for whom an AR is best suited, can bring forth new opportunities to integrate adaptive instruction and AR in the future. Through a quasi-experimental research design, our study recruited 66 participants in an 8-week long AR-assisted learning activity, and lag sequential analysis was used to analyze participants' behavior in an AR learning environment. We found that AR was more effective in enhancing the learning gains in elementary school science of learners who prefer a Kinesthetic approach to learning. We hypothesize that these effects are due to the increase in opportunity for hands-on activities, effectively increasing learners' concentration and passion for learning.", "abstracts": [ { "abstractType": "Regular", "content": "It has been shown in numerous studies that the application of Augmented Reality (AR) to teaching and learning is beneficial, but determining the reasons behind its effectiveness, and in particular the characteristics of students for whom an AR is best suited, can bring forth new opportunities to integrate adaptive instruction and AR in the future. Through a quasi-experimental research design, our study recruited 66 participants in an 8-week long AR-assisted learning activity, and lag sequential analysis was used to analyze participants' behavior in an AR learning environment. We found that AR was more effective in enhancing the learning gains in elementary school science of learners who prefer a Kinesthetic approach to learning. We hypothesize that these effects are due to the increase in opportunity for hands-on activities, effectively increasing learners' concentration and passion for learning.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "It has been shown in numerous studies that the application of Augmented Reality (AR) to teaching and learning is beneficial, but determining the reasons behind its effectiveness, and in particular the characteristics of students for whom an AR is best suited, can bring forth new opportunities to integrate adaptive instruction and AR in the future. Through a quasi-experimental research design, our study recruited 66 participants in an 8-week long AR-assisted learning activity, and lag sequential analysis was used to analyze participants' behavior in an AR learning environment. We found that AR was more effective in enhancing the learning gains in elementary school science of learners who prefer a Kinesthetic approach to learning. We hypothesize that these effects are due to the increase in opportunity for hands-on activities, effectively increasing learners' concentration and passion for learning.", "fno": "3641a107", "keywords": [ "Education", "Augmented Reality", "Visualization", "Systems Operation", "Computers", "Instruments", "Cameras", "K 12 Education", "Augmented Reality", "Computer Assisted Instruction" ], "authors": [ { "affiliation": null, "fullName": "Jia Zhang", "givenName": "Jia", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Amy Ogan", "givenName": "Amy", "surname": "Ogan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tzu-Chien Liu", "givenName": "Tzu-Chien", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yao-Ting Sung", "givenName": "Yao-Ting", "surname": "Sung", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kuo-En Chang", "givenName": "Kuo-En", "surname": "Chang", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-09-01T00:00:00", "pubType": "proceedings", "pages": "107-114", "year": "2016", "issn": null, "isbn": "978-1-5090-3641-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3641a100", "articleId": "12OmNvFHfKP", "__typename": "AdjacentArticleType" }, "next": { "fno": "3641a115", "articleId": "12OmNqBtiKn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/eitt/2014/4231/0/06982566", "title": "Cooperative Learning by Location-Based Augmented Reality for an Inquiry Learning Course", "doi": null, "abstractUrl": "/proceedings-article/eitt/2014/06982566/12OmNAXPy9z", "parentPublication": { "id": "proceedings/eitt/2014/4231/0", "title": "2014 International Conference of Educational Innovation through Technology (EITT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2017/0621/0/0621a539", "title": "Enhancing AR-based Science Exploration through Learning Cycle", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2017/0621a539/12OmNAYGlF1", "parentPublication": { "id": "proceedings/iiai-aai/2017/0621/0", "title": "2017 6th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/Ismar-mashd/2015/9628/0/9628a001", "title": "Using Augmented Reality to Promote Homogeneity in Learning Achievement", "doi": null, "abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a001/12OmNwc3wu8", "parentPublication": { "id": "proceedings/Ismar-mashd/2015/9628/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2013/3211/0/3211a385", "title": "Making a Hands-On Display with Augmented Reality Work at a Science Museum", "doi": null, "abstractUrl": "/proceedings-article/sitis/2013/3211a385/12OmNwpXRVO", "parentPublication": { "id": "proceedings/sitis/2013/3211/0", "title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2012/4702/0/4702a113", "title": "Behavioral Patterns and Learning Performance of Collaborative Knowledge Construction on an Augmented Reality System", "doi": null, "abstractUrl": "/proceedings-article/icalt/2012/4702a113/12OmNwpoFGH", "parentPublication": { "id": "proceedings/icalt/2012/4702/0", "title": "Advanced Learning Technologies, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/digitel/2008/3409/0/3409a215", "title": "Pedagogy Play: Virtual Instructors for Wearable Augmented Reality during Hands-On Learning and Play", "doi": null, "abstractUrl": "/proceedings-article/digitel/2008/3409a215/12OmNxXCGKF", "parentPublication": { "id": "proceedings/digitel/2008/3409/0", "title": "Digital Game and Intelligent Toy Enhanced Learning, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wmute/2012/4662/0/4662a322", "title": "AR-based Remote Video Learning System", "doi": null, "abstractUrl": "/proceedings-article/wmute/2012/4662a322/12OmNy2Jt8I", "parentPublication": { "id": "proceedings/wmute/2012/4662/0", "title": "IEEE International Conference on Wireless, Mobile, and Ubiquitous Technology in Education", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223407", "title": "Optical see-through HUDs effect on depth judgments of real world objects", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223407/12OmNyRg4pk", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836525", "title": "AR-Based Learning and AR Guides as Strategy in Two-Phase Learning Enhancement: A Case Study", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836525/12OmNz2C1pd", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a020", "title": "Design of an AR-Based System for Group Piano Learning", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a020/1gysji9xPlm", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyKJixw", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)", "acronym": "Ismar-mashd", "groupId": "1002953", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNwc3wu8", "doi": "10.1109/ISMAR-MASHD.2015.17", "title": "Using Augmented Reality to Promote Homogeneity in Learning Achievement", "normalizedTitle": "Using Augmented Reality to Promote Homogeneity in Learning Achievement", "abstract": "The issue of individual differences among learners has thus far received the most attention from education researchers. Many teachers strive to develop a learning strategy with significant effect on the majority of learners. Through literature analysis, this study found that not only could Augmented Reality (AR) improve learning effectiveness, it could also reduce the impact of individual differences on learning outcomes. Therefore, this study designed a set of AR-aided teaching systems to help teachers supplement curriculum content using AR. Sixty-six participants from elementary schools were involved in this study. The results confirmed that AR can help learners to achieve better learning outcomes, and can effectively improve learning achievement in non-high-scoring groups, enabling them to perform closer to those in high-scoring groups and reducing the gap in overall learning level. This study also discovered that technological barriers could reduce the benefits of AR in teaching contexts. Authoring tools with low operating thresholds are valuable for AR-aided teaching systems.", "abstracts": [ { "abstractType": "Regular", "content": "The issue of individual differences among learners has thus far received the most attention from education researchers. Many teachers strive to develop a learning strategy with significant effect on the majority of learners. Through literature analysis, this study found that not only could Augmented Reality (AR) improve learning effectiveness, it could also reduce the impact of individual differences on learning outcomes. Therefore, this study designed a set of AR-aided teaching systems to help teachers supplement curriculum content using AR. Sixty-six participants from elementary schools were involved in this study. The results confirmed that AR can help learners to achieve better learning outcomes, and can effectively improve learning achievement in non-high-scoring groups, enabling them to perform closer to those in high-scoring groups and reducing the gap in overall learning level. This study also discovered that technological barriers could reduce the benefits of AR in teaching contexts. Authoring tools with low operating thresholds are valuable for AR-aided teaching systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The issue of individual differences among learners has thus far received the most attention from education researchers. Many teachers strive to develop a learning strategy with significant effect on the majority of learners. Through literature analysis, this study found that not only could Augmented Reality (AR) improve learning effectiveness, it could also reduce the impact of individual differences on learning outcomes. Therefore, this study designed a set of AR-aided teaching systems to help teachers supplement curriculum content using AR. Sixty-six participants from elementary schools were involved in this study. The results confirmed that AR can help learners to achieve better learning outcomes, and can effectively improve learning achievement in non-high-scoring groups, enabling them to perform closer to those in high-scoring groups and reducing the gap in overall learning level. This study also discovered that technological barriers could reduce the benefits of AR in teaching contexts. Authoring tools with low operating thresholds are valuable for AR-aided teaching systems.", "fno": "9628a001", "keywords": [ "Education", "Standards", "Augmented Reality", "Computer Crashes", "Computers", "Correlation", "Visualization", "K 12 Education", "Augmented Reality", "Computer Assisted Instruction" ], "authors": [ { "affiliation": null, "fullName": "Jia Zhang", "givenName": "Jia", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tzu-Chien Liu", "givenName": "Tzu-Chien", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yao-Ting Sung", "givenName": "Yao-Ting", "surname": "Sung", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kuo-En Chang", "givenName": "Kuo-En", "surname": "Chang", "__typename": "ArticleAuthorType" } ], "idPrefix": "Ismar-mashd", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "1-5", "year": "2015", "issn": null, "isbn": "978-1-4673-9628-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "9628z017", "articleId": "12OmNyrqzru", "__typename": "AdjacentArticleType" }, "next": { "fno": "9628a006", "articleId": "12OmNC3Xhjl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2016/3641/0/3641a107", "title": "The Influence of using Augmented Reality on Textbook Support for Learners of Different Learning Styles", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a107/12OmNBzAciw", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2013/5009/0/5009a439", "title": "VECAR: Virtual English Classroom with Markerless Augmented Reality and Intuitive Gesture Interaction", "doi": null, "abstractUrl": "/proceedings-article/icalt/2013/5009a439/12OmNwJPMVw", "parentPublication": { "id": "proceedings/icalt/2013/5009/0", "title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2012/4702/0/4702a113", "title": "Behavioral Patterns and Learning Performance of Collaborative Knowledge Construction on an Augmented Reality System", "doi": null, "abstractUrl": "/proceedings-article/icalt/2012/4702a113/12OmNwpoFGH", "parentPublication": { "id": "proceedings/icalt/2012/4702/0", "title": "Advanced Learning Technologies, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mue/2011/4470/0/4470a246", "title": "Body Language and Augmented Reality Learning Environment", "doi": null, "abstractUrl": "/proceedings-article/mue/2011/4470a246/12OmNxXCGHO", "parentPublication": { "id": "proceedings/mue/2011/4470/0", "title": "Multimedia and Ubiquitous Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/digitel/2008/3409/0/3409a215", "title": "Pedagogy Play: Virtual Instructors for Wearable Augmented Reality during Hands-On Learning and Play", "doi": null, "abstractUrl": "/proceedings-article/digitel/2008/3409a215/12OmNxXCGKF", "parentPublication": { "id": "proceedings/digitel/2008/3409/0", "title": "Digital Game and Intelligent Toy Enhanced Learning, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiaiaai/2014/4174/0/06913314", "title": "A Mobile Augmented Reality Based Scaffolding Platform for Outdoor Fieldtrip Learning", "doi": null, "abstractUrl": "/proceedings-article/iiaiaai/2014/06913314/12OmNyNQSAi", "parentPublication": { "id": "proceedings/iiaiaai/2014/4174/0", "title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/Ismar-mashd/2014/6887/0/06935440", "title": "[Poster] AIBLE: An inquiry-based augmented reality environment for teaching astronomical phenomena", "doi": null, "abstractUrl": "/proceedings-article/Ismar-mashd/2014/06935440/12OmNyprnql", "parentPublication": { "id": "proceedings/Ismar-mashd/2014/6887/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2019/1198/0/119800a389", "title": "Implementation of Augmented Reality Globe in Teaching-Learning Environment", "doi": null, "abstractUrl": "/proceedings-article/mipr/2019/119800a389/19wB38QGJS8", "parentPublication": { "id": "proceedings/mipr/2019/1198/0", "title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2021/5841/0/584100b021", "title": "The use of Augmented Reality to promote learning in Science Laboratories", "doi": null, "abstractUrl": "/proceedings-article/csci/2021/584100b021/1EpKY0rAQDK", "parentPublication": { "id": "proceedings/csci/2021/5841/0", "title": "2021 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cipae/2021/2665/0/266500a018", "title": "Research on the Application of Augmented Reality Technology in College English Teaching", "doi": null, "abstractUrl": "/proceedings-article/cipae/2021/266500a018/1yQAUh3DQm4", "parentPublication": { "id": "proceedings/cipae/2021/2665/0", "title": "2021 International Conference on Computers, Information Processing and Advanced Education (CIPAE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19m3yLbYQdq", "title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)", "acronym": "iiai-aai", "groupId": "1801921", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19m3FUknh60", "doi": "10.1109/IIAI-AAI.2018.00200", "title": "A Study of Augmented Reality in Learning, Memory and Learning Motivation: A Case Study of Practical Writing in Middle School Life", "normalizedTitle": "A Study of Augmented Reality in Learning, Memory and Learning Motivation: A Case Study of Practical Writing in Middle School Life", "abstract": "With the development of Augmented Reality (AR) technology, more and more research studies have explored the educational impact of AR technology applications. However, in the field of practical writing, few studies discussed the effectiveness of AR technology in Chinese idiom (inscription) learning. In this study we developed a Chinese idiom learning system, named Chinese Idioms Learning Card (CILC), which combined AR technology and game-based learning design. In the current state of the study, the proposed system has been developed and continues to address usability based on the advice of linguistic experts and students. Two classes of junior high school students in Taiwan will participate in continuous experiments. In the future, a mixed study will be conducted to explore effectiveness of students' self-learning in practical writing with the help of using this AR learning system.", "abstracts": [ { "abstractType": "Regular", "content": "With the development of Augmented Reality (AR) technology, more and more research studies have explored the educational impact of AR technology applications. However, in the field of practical writing, few studies discussed the effectiveness of AR technology in Chinese idiom (inscription) learning. In this study we developed a Chinese idiom learning system, named Chinese Idioms Learning Card (CILC), which combined AR technology and game-based learning design. In the current state of the study, the proposed system has been developed and continues to address usability based on the advice of linguistic experts and students. Two classes of junior high school students in Taiwan will participate in continuous experiments. In the future, a mixed study will be conducted to explore effectiveness of students' self-learning in practical writing with the help of using this AR learning system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the development of Augmented Reality (AR) technology, more and more research studies have explored the educational impact of AR technology applications. However, in the field of practical writing, few studies discussed the effectiveness of AR technology in Chinese idiom (inscription) learning. In this study we developed a Chinese idiom learning system, named Chinese Idioms Learning Card (CILC), which combined AR technology and game-based learning design. In the current state of the study, the proposed system has been developed and continues to address usability based on the advice of linguistic experts and students. Two classes of junior high school students in Taiwan will participate in continuous experiments. In the future, a mixed study will be conducted to explore effectiveness of students' self-learning in practical writing with the help of using this AR learning system.", "fno": "744701a958", "keywords": [ "Augmented Reality", "Computer Aided Instruction", "Serious Games Computing", "Practical Writing", "Chinese Idiom Learning System", "Junior High School Students", "Mixed Study", "AR Learning System", "Middle School Life", "Educational Impact", "AR Technology Applications", "Augmented Reality Technology", "Game Based Learning Design", "Taiwan", "Writing", "Games", "Education", "Learning Systems", "Augmented Reality", "Information Management", "Animation", "Augmented Reality", "Practical Writing", "Learning Card" ], "authors": [ { "affiliation": "Dept. of Inf. Manage., Chien Hsin Univ. of Sci. & Technol., Chungli, Taiwan", "fullName": "Fang-Chuan Ou Yang", "givenName": "Fang-Chuan", "surname": "Ou Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Inf. Manage., Chien Hsin Univ. of Sci. & Technol., Chungli, Taiwan", "fullName": "Wan-Ting Huang", "givenName": "Wan-Ting", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Inf. Manage., Chung Yuan Christian Univ., Chungli, Taiwan", "fullName": "Ching-Jung Liao", "givenName": "Ching-Jung", "surname": "Liao", "__typename": "ArticleAuthorType" } ], "idPrefix": "iiai-aai", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-07-01T00:00:00", "pubType": "proceedings", "pages": "958-959", "year": "2018", "issn": null, "isbn": "978-1-5386-7447-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "744701a956", "articleId": "19m3DMc6e40", "__typename": "AdjacentArticleType" }, "next": { "fno": "744701a960", "articleId": "19m3EGrNzgI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icalt/2015/7334/0/7334a132", "title": "Augmented Reality Laboratory for High School Electrochemistry Course", "doi": null, "abstractUrl": "/proceedings-article/icalt/2015/7334a132/12OmNqBbHAA", "parentPublication": { "id": "proceedings/icalt/2015/7334/0", "title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892a031", "title": "Manipulating Virtual Objects with Your Hands: A Case Study on Applying Desktop Augmented Reality at the Primary School", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892a031/12OmNrMHOpd", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wmute/2012/4662/0/4662a322", "title": "AR-based Remote Video Learning System", "doi": null, "abstractUrl": "/proceedings-article/wmute/2012/4662a322/12OmNy2Jt8I", "parentPublication": { "id": "proceedings/wmute/2012/4662/0", "title": "IEEE International Conference on Wireless, Mobile, and Ubiquitous Technology in Education", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802077", "title": "An AR edutainment system supporting bone anatomy learning", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802077/12OmNylKAKS", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2014/4038/0/4038a576", "title": "Gremlings in My Mirror: An Inclusive AR-Enriched Videogame for Logical Math Skills Learning", "doi": null, "abstractUrl": "/proceedings-article/icalt/2014/4038a576/12OmNzXWZFy", "parentPublication": { "id": "proceedings/icalt/2014/4038/0", "title": "2014 IEEE 14th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtcsa/2011/1118/1/06029834", "title": "Augmented Reality Go: Extending Traditional Game Play with Interactive Self-Learning Support", "doi": null, "abstractUrl": "/proceedings-article/rtcsa/2011/06029834/12OmNzb7Zpy", "parentPublication": { "id": "proceedings/rtcsa/2011/1118/1", "title": "2011 IEEE 17th International Conference on Embedded and Real-Time Computing Systems and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08457524", "title": "ARbis Pictus: A Study of Vocabulary Learning with Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2018/11/08457524/14M3E0wp25a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2022/9519/0/951900a349", "title": "The Effect of Role Assignment on Students’ Collaborative Inquiry-based Learning in Augmented Reality Environment", "doi": null, "abstractUrl": "/proceedings-article/icalt/2022/951900a349/1FUUe1UnEGc", "parentPublication": { "id": "proceedings/icalt/2022/9519/0", "title": "2022 International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2020/6090/0/09155919", "title": "Effects of Augmented Reality Assisted Learning Materials on Students’ Learning Outcomes", "doi": null, "abstractUrl": "/proceedings-article/icalt/2020/09155919/1m1j7NOETSg", "parentPublication": { "id": "proceedings/icalt/2020/6090/0", "title": "2020 IEEE 20th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eitt/2020/9171/0/917100a229", "title": "Research on Mobile AR Language Learning Environment Based on Virtual Avatar", "doi": null, "abstractUrl": "/proceedings-article/eitt/2020/917100a229/1qyxq6q6MPS", "parentPublication": { "id": "proceedings/eitt/2020/9171/0", "title": "2020 Ninth International Conference of Educational Innovation through Technology (EITT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1FUU5pAuu8E", "title": "2022 International Conference on Advanced Learning Technologies (ICALT)", "acronym": "icalt", "groupId": "1000009", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1FUUfSsF2es", "doi": "10.1109/ICALT55010.2022.00083", "title": "Using Thematic English Learning and Augmented Reality to Enhance Vocabulary Learning Motivation and Enjoyment of Elementary School Students", "normalizedTitle": "Using Thematic English Learning and Augmented Reality to Enhance Vocabulary Learning Motivation and Enjoyment of Elementary School Students", "abstract": "Nowadays, vocabulary learning for second-grade students can be enhanced with appropriate teaching techniques, which can also improve their learning motivation and enjoyment at the same time. The use of Augmented Reality- Assisted Language Learning (ARALL) has been proven to be an innovative way to enhance the learning achievements of lower graders. Therefore, this study aimed to find out whether applying ARALL can improve the ability of lower-grade students in English vocabulary learning while also enhancing their learning motivation and enjoyment. Twenty-three lower-grade students participated in this study, and a mixed research method was adopted for data analysis. The results showed that the students did improve their vocabulary, and they enjoyed this ARALL instruction. Furthermore, through AR thematic English teaching methods, the students could remember the vocabulary rapidly and enhance their own confidence and enjoyment in learning English.", "abstracts": [ { "abstractType": "Regular", "content": "Nowadays, vocabulary learning for second-grade students can be enhanced with appropriate teaching techniques, which can also improve their learning motivation and enjoyment at the same time. The use of Augmented Reality- Assisted Language Learning (ARALL) has been proven to be an innovative way to enhance the learning achievements of lower graders. Therefore, this study aimed to find out whether applying ARALL can improve the ability of lower-grade students in English vocabulary learning while also enhancing their learning motivation and enjoyment. Twenty-three lower-grade students participated in this study, and a mixed research method was adopted for data analysis. The results showed that the students did improve their vocabulary, and they enjoyed this ARALL instruction. Furthermore, through AR thematic English teaching methods, the students could remember the vocabulary rapidly and enhance their own confidence and enjoyment in learning English.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Nowadays, vocabulary learning for second-grade students can be enhanced with appropriate teaching techniques, which can also improve their learning motivation and enjoyment at the same time. The use of Augmented Reality- Assisted Language Learning (ARALL) has been proven to be an innovative way to enhance the learning achievements of lower graders. Therefore, this study aimed to find out whether applying ARALL can improve the ability of lower-grade students in English vocabulary learning while also enhancing their learning motivation and enjoyment. Twenty-three lower-grade students participated in this study, and a mixed research method was adopted for data analysis. The results showed that the students did improve their vocabulary, and they enjoyed this ARALL instruction. Furthermore, through AR thematic English teaching methods, the students could remember the vocabulary rapidly and enhance their own confidence and enjoyment in learning English.", "fno": "951900a256", "keywords": [ "Augmented Reality", "Computer Aided Instruction", "Data Analysis", "Linguistics", "Teaching", "Vocabulary", "Elementary School Students", "Second Grade Students", "ARALL", "Lower Grade Students", "English Vocabulary", "English Teaching Methods", "Thematic English Learning", "Vocabulary Learning Motivation", "Augmented Reality Assisted Language Learning", "Data Analysis", "Vocabulary", "Data Analysis", "Education", "Augmented Reality", "Augmented Reality Assisted", "Thematic English Learning", "Lower Grade Pupil", "Learning Motivation", "Learning Enjoyment" ], "authors": [ { "affiliation": "Asia University,Department of Foreign Languages and Literature,Taichung City,Taiwan", "fullName": "Hsueh-Ching Chuang", "givenName": "Hsueh-Ching", "surname": "Chuang", "__typename": "ArticleAuthorType" }, { "affiliation": "Asia University,Department of Foreign Languages and Literature,Taichung City,Taiwan", "fullName": "Venny Gunawan", "givenName": "Venny", "surname": "Gunawan", "__typename": "ArticleAuthorType" }, { "affiliation": "Asia University,Department of Foreign Languages and Literature,Taichung City,Taiwan", "fullName": "Wen-Chi Wu", "givenName": "Wen-Chi", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Asia University,Department of Foreign Languages and Literature,Taichung City,Taiwan", "fullName": "Kun-Liang Chuang", "givenName": "Kun-Liang", "surname": "Chuang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icalt", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "256-258", "year": "2022", "issn": null, "isbn": "978-1-6654-9519-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "951900a253", "articleId": "1FUU86Y9fBm", "__typename": "AdjacentArticleType" }, "next": { "fno": "951900a259", "articleId": "1FUUkF8jWX6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icalt/2016/9041/0/9041a307", "title": "The Acceptance of Learning Augmented Reality Environments: A Case Study", "doi": null, "abstractUrl": "/proceedings-article/icalt/2016/9041a307/12OmNARiM5M", "parentPublication": { "id": "proceedings/icalt/2016/9041/0", "title": "2016 IEEE 16th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2016/8985/0/8985a381", "title": "Computer Assisted Vocabulary Learning: Examining English Language Learners' Vocabulary Notebooks", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2016/8985a381/12OmNCfjeBX", "parentPublication": { "id": "proceedings/iiai-aai/2016/8985/0", "title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2015/9957/0/07373913", "title": "Development of a Contextual Game for Improving English Vocabulary Learning Performance of Elementary School Students in Taiwan", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2015/07373913/12OmNvDZF7N", "parentPublication": { "id": "proceedings/iiai-aai/2015/9957/0", "title": "2015 IIAI 4th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2017/3870/0/3870a365", "title": "Comparing the Judgment and Accuracy of English Vocabulary Tests Relevant to Metamemory Practice", "doi": null, "abstractUrl": "/proceedings-article/icalt/2017/3870a365/12OmNzhnaft", "parentPublication": { "id": "proceedings/icalt/2017/3870/0", "title": "2017 IEEE 17th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicse/2015/0454/0/0454a262", "title": "An Integration of Etymology with Vocabulary Teaching", "doi": null, "abstractUrl": "/proceedings-article/icicse/2015/0454a262/12OmNzwHv8m", "parentPublication": { "id": "proceedings/icicse/2015/0454/0", "title": "2015 Eighth International Conference on Internet Computing for Science and Engineering (ICICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08457524", "title": "ARbis Pictus: A Study of Vocabulary Learning with Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2018/11/08457524/14M3E0wp25a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2018/7447/0/744701a164", "title": "An English Vocabulary Learning APP with Self-Regulated Learning Mechanism for Promoting Learning Performance and Motivation", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2018/744701a164/19m3FxbuYzS", "parentPublication": { "id": "proceedings/iiai-aai/2018/7447/0", "title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itei/2021/8050/0/805000a167", "title": "A study of frequency-based and mobile terminal-based deep learning of college English vocabulary", "doi": null, "abstractUrl": "/proceedings-article/itei/2021/805000a167/1CzeJ1uZTNK", "parentPublication": { "id": "proceedings/itei/2021/8050/0", "title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a150", "title": "Words in Kitchen: An Instance of Leveraging Virtual Reality Technology to Learn Vocabulary", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a150/1gysnPldm9O", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/08/09286680", "title": "New Media and Space: An Empirical Study of Learning and Enjoyment Through Museum Hybrid Space", "doi": null, "abstractUrl": "/journal/tg/2022/08/09286680/1por35qBdQs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0NXcPJGo", "doi": "10.1109/VR.2019.8797804", "title": "Semantic Labeling and Object Registration for Augmented Reality Language Learning", "normalizedTitle": "Semantic Labeling and Object Registration for Augmented Reality Language Learning", "abstract": "We propose an Augmented Reality vocabulary learning interface in which objects in a user's environment are automatically recognized and labeled in a foreign language. Using AR for language learning in this manner is still impractical for a number of reasons. Scalable object recognition and consistent labeling of objects is still a significant challenge, and interaction with arbitrary physical objects in AR scenes has consequently not been well explored. To help address these challenges, we present a system that utilizes real-time object recognition to perform semantic labeling and object registration in Augmented Reality. We discuss its implementation, our motivations in designing it, and how it can be applied to AR language learning applications.", "abstracts": [ { "abstractType": "Regular", "content": "We propose an Augmented Reality vocabulary learning interface in which objects in a user's environment are automatically recognized and labeled in a foreign language. Using AR for language learning in this manner is still impractical for a number of reasons. Scalable object recognition and consistent labeling of objects is still a significant challenge, and interaction with arbitrary physical objects in AR scenes has consequently not been well explored. To help address these challenges, we present a system that utilizes real-time object recognition to perform semantic labeling and object registration in Augmented Reality. We discuss its implementation, our motivations in designing it, and how it can be applied to AR language learning applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose an Augmented Reality vocabulary learning interface in which objects in a user's environment are automatically recognized and labeled in a foreign language. Using AR for language learning in this manner is still impractical for a number of reasons. Scalable object recognition and consistent labeling of objects is still a significant challenge, and interaction with arbitrary physical objects in AR scenes has consequently not been well explored. To help address these challenges, we present a system that utilizes real-time object recognition to perform semantic labeling and object registration in Augmented Reality. We discuss its implementation, our motivations in designing it, and how it can be applied to AR language learning applications.", "fno": "08797804", "keywords": [ "Augmented Reality", "Computer Aided Instruction", "Learning Artificial Intelligence", "Object Recognition", "User Interfaces", "Virtual Reality Languages", "Vocabulary", "Real Time Object Recognition", "Semantic Labeling", "Object Registration", "Augmented Reality Language Learning", "Foreign Language", "Scalable Object Recognition", "Arbitrary Physical Objects", "Augmented Reality Vocabulary Learning Interface", "AR Language Learning Applications", "User Environment", "Calibration", "Gaze Tracking", "Labeling", "Augmented Reality", "Object Recognition", "Three Dimensional Displays", "Cameras", "Human Centered Computing X 2014 Mixed And Augmented Reality", "Theory And Algorithms For Application Domains X 2014 Semi Supervised Learning" ], "authors": [ { "affiliation": "University of California, Santa Barbara", "fullName": "Brandon Huynh", "givenName": "Brandon", "surname": "Huynh", "__typename": "ArticleAuthorType" }, { "affiliation": "Osaka University", "fullName": "Jason Orlosky", "givenName": "Jason", "surname": "Orlosky", "__typename": "ArticleAuthorType" }, { "affiliation": "University of California, Santa Barbara", "fullName": "Tobias Höllerer", "givenName": "Tobias", "surname": "Höllerer", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "986-987", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798248", "articleId": "1cJ1grgtt1m", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798267", "articleId": "1cJ0RUiTm8g", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/kam/2009/3888/3/3888c091", "title": "Ubiquitous Augmented Reality System", "doi": null, "abstractUrl": "/proceedings-article/kam/2009/3888c091/12OmNCcKQOw", "parentPublication": { "id": "proceedings/kam/2009/3888/1", "title": "Knowledge Acquisition and Modeling, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuvr/2008/3259/0/3259a037", "title": "Introduction of Physics Simulation in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2008/3259a037/12OmNvB9Fyb", "parentPublication": { "id": "proceedings/isuvr/2008/3259/0", "title": "International Symposium on Ubiquitous Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811001", "title": "Explosion Diagrams in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811001/12OmNwE9OR0", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2006/0225/0/02250135", "title": "SSIML/AR: A Visual Language for the Abstract Specification of Augmented Reality User Interfaces", "doi": null, "abstractUrl": "/proceedings-article/3dui/2006/02250135/12OmNx1qV0e", "parentPublication": { "id": "proceedings/3dui/2006/0225/0", "title": "3D User Interfaces (3DUI'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mue/2011/4470/0/4470a246", "title": "Body Language and Augmented Reality Learning Environment", "doi": null, "abstractUrl": "/proceedings-article/mue/2011/4470a246/12OmNxXCGHO", "parentPublication": { "id": "proceedings/mue/2011/4470/0", "title": "Multimedia and Ubiquitous Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08457524", "title": "ARbis Pictus: A Study of Vocabulary Learning with Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2018/11/08457524/14M3E0wp25a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699263", "title": "Design and Calibration of an Augmented Reality Haploscope", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699263/19F1OYkEmWs", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699304", "title": "Addressing the Occlusion Problem in Augmented Reality Environments with Phantom Hollow Objects", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699304/19F1T4QjgOY", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798358", "title": "In-Situ Labeling for Augmented Reality Language Learning", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798358/1cJ0VFN6eIw", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a384", "title": "An Empirical Study of Size Discrimination in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a384/1yeQWO0csfe", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1gyshXRzHpK", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1gysnPldm9O", "doi": "10.1109/ISMAR-Adjunct.2019.00-59", "title": "Words in Kitchen: An Instance of Leveraging Virtual Reality Technology to Learn Vocabulary", "normalizedTitle": "Words in Kitchen: An Instance of Leveraging Virtual Reality Technology to Learn Vocabulary", "abstract": "Vocabulary is an essential part in second language learning. The traditional way of learning vocabulary is dull and boring in class with thin context. Nevertheless, a rich context for memorizing words is necessary according to the situated learning theory. Thanks to virtual reality (VR) technology, such a rich context can be achieved by creating a virtual simulation scenario. \"Words In Kitchen\" is a VR system designed as an educational tool for second language learners to learn vocabulary. With the help of the proposed system, a user can interact with virtual objects in the virtual scene to learn the spelling and pronunciation of specific vocabularies. We aim to make full use of immersion and interaction of VR and propose a prototype of vocabulary learning in VR, so that the system can promote learners' common vocabulary learning as a supplement to class and textbooks. A pilot study was conducted to identify its usability and enjoyment and the result indicated that the system could help remember words and stimulate interests.", "abstracts": [ { "abstractType": "Regular", "content": "Vocabulary is an essential part in second language learning. The traditional way of learning vocabulary is dull and boring in class with thin context. Nevertheless, a rich context for memorizing words is necessary according to the situated learning theory. Thanks to virtual reality (VR) technology, such a rich context can be achieved by creating a virtual simulation scenario. \"Words In Kitchen\" is a VR system designed as an educational tool for second language learners to learn vocabulary. With the help of the proposed system, a user can interact with virtual objects in the virtual scene to learn the spelling and pronunciation of specific vocabularies. We aim to make full use of immersion and interaction of VR and propose a prototype of vocabulary learning in VR, so that the system can promote learners' common vocabulary learning as a supplement to class and textbooks. A pilot study was conducted to identify its usability and enjoyment and the result indicated that the system could help remember words and stimulate interests.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Vocabulary is an essential part in second language learning. The traditional way of learning vocabulary is dull and boring in class with thin context. Nevertheless, a rich context for memorizing words is necessary according to the situated learning theory. Thanks to virtual reality (VR) technology, such a rich context can be achieved by creating a virtual simulation scenario. \"Words In Kitchen\" is a VR system designed as an educational tool for second language learners to learn vocabulary. With the help of the proposed system, a user can interact with virtual objects in the virtual scene to learn the spelling and pronunciation of specific vocabularies. We aim to make full use of immersion and interaction of VR and propose a prototype of vocabulary learning in VR, so that the system can promote learners' common vocabulary learning as a supplement to class and textbooks. A pilot study was conducted to identify its usability and enjoyment and the result indicated that the system could help remember words and stimulate interests.", "fno": "476500a150", "keywords": [ "Computer Aided Instruction", "Linguistics", "Virtual Reality", "Vocabulary", "Pronunciation", "Spelling", "Educational Tool", "Words Memorization", "Words In Kitchen", "Second Language Learning", "Virtual Reality Technology", "Vocabulary Learning", "Virtual Scene", "Virtual Objects", "Language Learners", "VR System", "Virtual Simulation Scenario", "Situated Learning Theory", "Vocabulary", "Task Analysis", "Education", "Games", "Augmented Reality", "Tools", "Language Learning Education Virtual Reality" ], "authors": [ { "affiliation": "Beijing Institute of Technology", "fullName": "Tianyu Jia", "givenName": "Tianyu", "surname": "Jia", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Institute of Technology", "fullName": "Yue Liu", "givenName": "Yue", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "150-155", "year": "2019", "issn": null, "isbn": "978-1-7281-4765-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "476500a144", "articleId": "1gysoyOrm2A", "__typename": "AdjacentArticleType" }, "next": { "fno": "476500a156", "articleId": "1gysiAZnF16", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iset/2017/3031/0/08005387", "title": "Using Augmented Reality to Teach Kindergarten Students English Vocabulary", "doi": null, "abstractUrl": "/proceedings-article/iset/2017/08005387/12OmNCdBDT9", "parentPublication": { "id": "proceedings/iset/2017/3031/0", "title": "2017 International Symposium on Educational Technology (ISET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2013/5009/0/5009a235", "title": "Game-Based Micro-learning Approach for Language Vocabulary Acquisition Using LingoSnacks", "doi": null, "abstractUrl": "/proceedings-article/icalt/2013/5009a235/12OmNwDAC7F", "parentPublication": { "id": "proceedings/icalt/2013/5009/0", "title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/caapwd/1992/2730/0/00217405", "title": "Computer-assisted early vocabulary development", "doi": null, "abstractUrl": "/proceedings-article/caapwd/1992/00217405/12OmNxWcHq5", "parentPublication": { "id": "proceedings/caapwd/1992/2730/0", "title": "Proceedings of the Johns Hopkins National Search for Computing Applications to Assist Persons with Disabilities", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2002/7402/1/05743875", "title": "Transcription of out-of-vocabulary words in large vocabulary speech recognition based on phoneme-to-grapheme conversion", "doi": null, "abstractUrl": "/proceedings-article/icassp/2002/05743875/12OmNzEmFEf", "parentPublication": { "id": "proceedings/icassp/2002/7402/1", "title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08457524", "title": "ARbis Pictus: A Study of Vocabulary Learning with Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2018/11/08457524/14M3E0wp25a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a267", "title": "Improving Language Learning by an Interact-to-Learn Desktop VR Application: A Case Study with Peinture", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a267/1CJeH98Mvg4", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2022/9519/0/951900a256", "title": "Using Thematic English Learning and Augmented Reality to Enhance Vocabulary Learning Motivation and Enjoyment of Elementary School Students", "doi": null, "abstractUrl": "/proceedings-article/icalt/2022/951900a256/1FUUfSsF2es", "parentPublication": { "id": "proceedings/icalt/2022/9519/0", "title": "2022 International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09872027", "title": "VocabulARy: Learning Vocabulary in AR Supported by Keyword Visualisations", "doi": null, "abstractUrl": "/journal/tg/2022/11/09872027/1GhRUPatDmU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icekim/2022/1666/0/166600a442", "title": "An Empirical Study of the Co-relations Between Dictionary Use and Vocabulary Acquisition", "doi": null, "abstractUrl": "/proceedings-article/icekim/2022/166600a442/1KpBw1K2OVW", "parentPublication": { "id": "proceedings/icekim/2022/1666/0", "title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2018/1360/0/136000a590", "title": "Improvement of Interactive Learning Support System with Adaptive Vocabulary Lists", "doi": null, "abstractUrl": "/proceedings-article/csci/2018/136000a590/1gjRoLfskZW", "parentPublication": { "id": "proceedings/csci/2018/1360/0", "title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuirq", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNAXglOH", "doi": "10.1109/ISMAR.2012.6402595", "title": "Depth perception control by hiding displayed images based on car vibration for monocular head-up display", "normalizedTitle": "Depth perception control by hiding displayed images based on car vibration for monocular head-up display", "abstract": "We have developed a novel depth perception control method for a monocular head-up display (HUD) in a car. However, it is difficult to achieve an accurate depth perception in the real world because of car vibration. To resolve this problem, we focus on a property that people complement hidden images by previous continuous observed images. We hide the image on the HUD when the car is vibrated. We aim to point at the accurate depth position by using HUD images with having users compliment the hidden image positions based on the continuous images before car vibration. We developed a car which detects big vibration by an acceleration sensor and is equipped with our monocular HUD. Our method pointed at the depth position within a 3.4 [m] error, which was 2 times more accurate than the previous method does.", "abstracts": [ { "abstractType": "Regular", "content": "We have developed a novel depth perception control method for a monocular head-up display (HUD) in a car. However, it is difficult to achieve an accurate depth perception in the real world because of car vibration. To resolve this problem, we focus on a property that people complement hidden images by previous continuous observed images. We hide the image on the HUD when the car is vibrated. We aim to point at the accurate depth position by using HUD images with having users compliment the hidden image positions based on the continuous images before car vibration. We developed a car which detects big vibration by an acceleration sensor and is equipped with our monocular HUD. Our method pointed at the depth position within a 3.4 [m] error, which was 2 times more accurate than the previous method does.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We have developed a novel depth perception control method for a monocular head-up display (HUD) in a car. However, it is difficult to achieve an accurate depth perception in the real world because of car vibration. To resolve this problem, we focus on a property that people complement hidden images by previous continuous observed images. We hide the image on the HUD when the car is vibrated. We aim to point at the accurate depth position by using HUD images with having users compliment the hidden image positions based on the continuous images before car vibration. We developed a car which detects big vibration by an acceleration sensor and is equipped with our monocular HUD. Our method pointed at the depth position within a 3.4 [m] error, which was 2 times more accurate than the previous method does.", "fno": "06402595", "keywords": [ "Vibrations", "Acceleration", "Navigation", "Augmented Reality", "Position Measurement", "Image Resolution", "Vehicles", "Depth Control", "HUD", "Navigation", "Augmented Reality" ], "authors": [ { "affiliation": "Corporate Research & Development center, Toshiba Corporation, 1, Komukai-Toshiba-cho, Saiwai-ku, Kawasaki, 212-8582, Japan", "fullName": "Tsuyoshi Tasaki", "givenName": "Tsuyoshi", "surname": "Tasaki", "__typename": "ArticleAuthorType" }, { "affiliation": "Corporate Research & Development center, Toshiba Corporation, 1, Komukai-Toshiba-cho, Saiwai-ku, Kawasaki, 212-8582, Japan", "fullName": "Akihisa Moriya", "givenName": "Akihisa", "surname": "Moriya", "__typename": "ArticleAuthorType" }, { "affiliation": "Corporate Research & Development center, Toshiba Corporation, 1, Komukai-Toshiba-cho, Saiwai-ku, Kawasaki, 212-8582, Japan", "fullName": "Aira Hotta", "givenName": "Aira", "surname": "Hotta", "__typename": "ArticleAuthorType" }, { "affiliation": "Corporate Research & Development center, Toshiba Corporation, 1, Komukai-Toshiba-cho, Saiwai-ku, Kawasaki, 212-8582, Japan", "fullName": "Takashi Sasaki", "givenName": "Takashi", "surname": "Sasaki", "__typename": "ArticleAuthorType" }, { "affiliation": "Corporate Research & Development center, Toshiba Corporation, 1, Komukai-Toshiba-cho, Saiwai-ku, Kawasaki, 212-8582, Japan", "fullName": "Haruhiko Okumura", "givenName": "Haruhiko", "surname": "Okumura", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "323-324", "year": "2012", "issn": null, "isbn": "978-1-4673-4660-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06402594", "articleId": "12OmNykCcft", "__typename": "AdjacentArticleType" }, "next": { "fno": "06402596", "articleId": "12OmNylKB1f", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmtma/2011/4296/1/4296a573", "title": "Application Research of Vibration and Noise Test and Analysis System in High Speed Railway Passenger Car", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2011/4296a573/12OmNqyDjmf", "parentPublication": { "id": "proceedings/icmtma/2011/4296/1", "title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836523", "title": "Human Attention and fatigue for AR Head-Up Displays", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836523/12OmNwFidbp", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ds-rt/2011/1643/0/06051803", "title": "Performance Characterization on Mobile Phones for Collaborative Augmented Reality (CAR) Applications", "doi": null, "abstractUrl": "/proceedings-article/ds-rt/2011/06051803/12OmNwdtw7n", "parentPublication": { "id": "proceedings/ds-rt/2011/1643/0", "title": "2011 IEEE/ACM 15th International Symposium on Distributed Simulation and Real Time Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2016/4149/0/4149a095", "title": "A Study in Virtual Navigation Cues for Forklift Operators", "doi": null, "abstractUrl": "/proceedings-article/svr/2016/4149a095/12OmNwlqhJO", "parentPublication": { "id": "proceedings/svr/2016/4149/0", "title": "2016 XVIII Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223407", "title": "Optical see-through HUDs effect on depth judgments of real world objects", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223407/12OmNyRg4pk", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223465", "title": "Optical see-through head up displays' effect on depth judgments of real world objects", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223465/12OmNybfr2x", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08466859", "title": "Augmented Reality Interface Design Approaches for Goal-directed and Stimulus-driven Driving Tasks", "doi": null, "abstractUrl": "/journal/tg/2018/11/08466859/14M3E5b55mM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcmeim/2021/2172/0/217200a465", "title": "Vibration characteristics of High-speed EMU considering car body structural vibration mode", "doi": null, "abstractUrl": "/proceedings-article/wcmeim/2021/217200a465/1ANLFYLvRks", "parentPublication": { "id": "proceedings/wcmeim/2021/2172/0", "title": "2021 4th World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccmso/2022/3288/0/328800a364", "title": "Analysis of a Semi-Active Vibration Absorber based on Magnetorheological Materials for a half-car model", "doi": null, "abstractUrl": "/proceedings-article/iccmso/2022/328800a364/1Mq128DK5kk", "parentPublication": { "id": "proceedings/iccmso/2022/3288/0", "title": "2022 International Conference on Computational Modelling, Simulation and Optimization (ICCMSO)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2019/5606/0/560600a040", "title": "Detection of Car Abnormal Vibration using Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/ism/2019/560600a040/1gFJcXCPyDu", "parentPublication": { "id": "proceedings/ism/2019/5606/0", "title": "2019 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzcPA9q", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNAo45DT", "doi": "10.1109/ISMAR.2017.30", "title": "3D-FRC: Depiction of the future road course in the Head-Up-Display", "normalizedTitle": "3D-FRC: Depiction of the future road course in the Head-Up-Display", "abstract": "The introduction of Head-Up-Displays (HUDs) have opened up avenues for a whole range of novel AR applications. However, until these applications become available for the mass market a number of problems need to be tackled. For example, the field of view (FoV) of current HUDs is extremely limited, and real world tracking and 3D reconstruction are still not precise enough to show driving information embedded into wide areas of complex traffic environment. It is not possible to show true AR-visualizations in the display areas provided by the current FoVs. In this paper, we investigate how an AR-like visualization approach in current HUDs (with a limited FoV) can support drivers in foreseeing the future road course. This visualisation uses the already established concept of an electronic horizon. By complying with automotive standards, our application can be easily adapted for series production. With this visualisation we performed a user study, investigating the effect on drivers' gaze behaviour. For this reason the test subjects were equipped with an eye tracking system. The results showed a decrease in both, the number of gazes as well as total glance time on the head unit and the instrument cluster. We also investigated the test subjects' braking behaviour around sharp bends of the road which showed an overall improvement when the visualisation was enabled. Furthermore it showed an increase of the mean glance duration in the area of the HUD. Note that the eye tracking system is not capable of distinguishing between glances at the visualisation in the HUD and the users' glance at objects behind the visualisation - overlapping with the HUD. This would require tracking the test persons' depth of focus. The study showed that developers need to be concerned about not displaying excessively in the HUD, so as not to distract drivers. It furthermore showed that AR-like visualizations have the potential to decrease the time the driver is not looking at the road creating a safer driving experience.", "abstracts": [ { "abstractType": "Regular", "content": "The introduction of Head-Up-Displays (HUDs) have opened up avenues for a whole range of novel AR applications. However, until these applications become available for the mass market a number of problems need to be tackled. For example, the field of view (FoV) of current HUDs is extremely limited, and real world tracking and 3D reconstruction are still not precise enough to show driving information embedded into wide areas of complex traffic environment. It is not possible to show true AR-visualizations in the display areas provided by the current FoVs. In this paper, we investigate how an AR-like visualization approach in current HUDs (with a limited FoV) can support drivers in foreseeing the future road course. This visualisation uses the already established concept of an electronic horizon. By complying with automotive standards, our application can be easily adapted for series production. With this visualisation we performed a user study, investigating the effect on drivers' gaze behaviour. For this reason the test subjects were equipped with an eye tracking system. The results showed a decrease in both, the number of gazes as well as total glance time on the head unit and the instrument cluster. We also investigated the test subjects' braking behaviour around sharp bends of the road which showed an overall improvement when the visualisation was enabled. Furthermore it showed an increase of the mean glance duration in the area of the HUD. Note that the eye tracking system is not capable of distinguishing between glances at the visualisation in the HUD and the users' glance at objects behind the visualisation - overlapping with the HUD. This would require tracking the test persons' depth of focus. The study showed that developers need to be concerned about not displaying excessively in the HUD, so as not to distract drivers. It furthermore showed that AR-like visualizations have the potential to decrease the time the driver is not looking at the road creating a safer driving experience.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The introduction of Head-Up-Displays (HUDs) have opened up avenues for a whole range of novel AR applications. However, until these applications become available for the mass market a number of problems need to be tackled. For example, the field of view (FoV) of current HUDs is extremely limited, and real world tracking and 3D reconstruction are still not precise enough to show driving information embedded into wide areas of complex traffic environment. It is not possible to show true AR-visualizations in the display areas provided by the current FoVs. In this paper, we investigate how an AR-like visualization approach in current HUDs (with a limited FoV) can support drivers in foreseeing the future road course. This visualisation uses the already established concept of an electronic horizon. By complying with automotive standards, our application can be easily adapted for series production. With this visualisation we performed a user study, investigating the effect on drivers' gaze behaviour. For this reason the test subjects were equipped with an eye tracking system. The results showed a decrease in both, the number of gazes as well as total glance time on the head unit and the instrument cluster. We also investigated the test subjects' braking behaviour around sharp bends of the road which showed an overall improvement when the visualisation was enabled. Furthermore it showed an increase of the mean glance duration in the area of the HUD. Note that the eye tracking system is not capable of distinguishing between glances at the visualisation in the HUD and the users' glance at objects behind the visualisation - overlapping with the HUD. This would require tracking the test persons' depth of focus. The study showed that developers need to be concerned about not displaying excessively in the HUD, so as not to distract drivers. It furthermore showed that AR-like visualizations have the potential to decrease the time the driver is not looking at the road creating a safer driving experience.", "fno": "2943a136", "keywords": [ "Automotive Engineering", "Data Visualisation", "Head Up Displays", "Road Safety", "Road Vehicles", "Traffic Engineering Computing", "Future Road Course", "Head Up Display", "HUD", "Fo V", "World Tracking", "Wide Areas", "Complex Traffic Environment", "Visualization Approach", "Eye Tracking System", "Total Glance Time", "Mean Glance Duration", "Head Up Displays", "Visualization", "Roads", "Automobiles", "Instruments", "Navigation", "Gaze Tracking" ], "authors": [ { "affiliation": null, "fullName": "Christian A. Wiesner", "givenName": "Christian A.", "surname": "Wiesner", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mike Ruf", "givenName": "Mike", "surname": "Ruf", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Demet Sirim", "givenName": "Demet", "surname": "Sirim", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Gudrun Klinker", "givenName": "Gudrun", "surname": "Klinker", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "136-143", "year": "2017", "issn": null, "isbn": "978-1-5386-2943-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2943a123", "articleId": "12OmNBcAGNU", "__typename": "AdjacentArticleType" }, "next": { "fno": "2943a144", "articleId": "12OmNrFTr7T", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icseng/2017/0610/0/0610a151", "title": "Background Scene Dominant Color Based Visibility Enhancement of Head-Up Display", "doi": null, "abstractUrl": "/proceedings-article/icseng/2017/0610a151/12OmNA1Vnwi", "parentPublication": { "id": "proceedings/icseng/2017/0610/0", "title": "2017 25th International Conference on Systems Engineering (ICSEng)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402595", "title": "Depth perception control by hiding displayed images based on car vibration for monocular head-up display", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402595/12OmNAXglOH", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a168", "title": "[POSTER] Overlaying Navigation Signs on a Road Surface Using a Head-Up Display", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a168/12OmNBhZ4i1", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2009/3804/2/3804b321", "title": "Automatic Parallax Measurement Method for Head-Up Display System", "doi": null, "abstractUrl": "/proceedings-article/icicta/2009/3804b321/12OmNwdL7eW", "parentPublication": { "id": "proceedings/icicta/2009/3804/3", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836468", "title": "Visualisation of the Electronic Horizon in Head-Up-Displays", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836468/12OmNzDehaq", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08466859", "title": "Augmented Reality Interface Design Approaches for Goal-directed and Stimulus-driven Driving Tasks", "doi": null, "abstractUrl": "/journal/tg/2018/11/08466859/14M3E5b55mM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089433", "title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090511", "title": "Evaluating Automotive Augmented Reality Head-up Display Effects on Driver Performance and Distraction", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090511/1jIxviTG03C", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090629", "title": "Place in the World or Place on the Screen? Investigating the Effects of Augmented Reality Head -Up Display User Interfaces on Drivers’ Spatial Knowledge Acquisition and Glance Behavior", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090629/1jIxyqb5Ali", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a228", "title": "A Comparison of Common Video Game versus Real-World Heads-Up-Display Designs for the Purpose of Target Localization and Identification", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a228/1yeQYcj6Nfq", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAYoKmw", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNxR5UMF", "doi": "10.1109/ISMAR.2013.6671798", "title": "Are HMDs the better HUDs?", "normalizedTitle": "Are HMDs the better HUDs?", "abstract": "Head-mounted displays (HMDs) have the potential to overcome some of the technological limitations of currently existing automotive head-up displays (HUDs), such as the limited field of view and the restrictive boundaries of the windshield. In an initial study we evaluated the use of HMDs in cars by means of a typical HUD visualization, using a HUD as baseline output technology. We found no significant differences in terms of driving performance, physical uneasiness or visual distraction. User statements revealed several advantages and drawbacks of the different output technologies apart from technological maturity and ergonomics. These results will hopefully inspire researchers as well as application developers and even might lead us to novel HMD visualization approaches.", "abstracts": [ { "abstractType": "Regular", "content": "Head-mounted displays (HMDs) have the potential to overcome some of the technological limitations of currently existing automotive head-up displays (HUDs), such as the limited field of view and the restrictive boundaries of the windshield. In an initial study we evaluated the use of HMDs in cars by means of a typical HUD visualization, using a HUD as baseline output technology. We found no significant differences in terms of driving performance, physical uneasiness or visual distraction. User statements revealed several advantages and drawbacks of the different output technologies apart from technological maturity and ergonomics. These results will hopefully inspire researchers as well as application developers and even might lead us to novel HMD visualization approaches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Head-mounted displays (HMDs) have the potential to overcome some of the technological limitations of currently existing automotive head-up displays (HUDs), such as the limited field of view and the restrictive boundaries of the windshield. In an initial study we evaluated the use of HMDs in cars by means of a typical HUD visualization, using a HUD as baseline output technology. We found no significant differences in terms of driving performance, physical uneasiness or visual distraction. User statements revealed several advantages and drawbacks of the different output technologies apart from technological maturity and ergonomics. These results will hopefully inspire researchers as well as application developers and even might lead us to novel HMD visualization approaches.", "fno": "06671798", "keywords": [ "Visualization", "Vehicles", "Mirrors", "Augmented Reality", "Data Visualization", "Educational Institutions", "Automotive Engineering", "Mixed Reality", "Head Mounted Display", "Head Up Display" ], "authors": [ { "affiliation": "HCI Group, Univ. of Munich (LMU), Munich, Germany", "fullName": "Felix Lauber", "givenName": "Felix", "surname": "Lauber", "__typename": "ArticleAuthorType" }, { "affiliation": "HCI Group, Univ. of Munich (LMU), Munich, Germany", "fullName": "Andreas Butz", "givenName": "Andreas", "surname": "Butz", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-10-01T00:00:00", "pubType": "proceedings", "pages": "267-268", "year": "2013", "issn": null, "isbn": "978-1-4799-2869-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06671797", "articleId": "12OmNCdBDGD", "__typename": "AdjacentArticleType" }, "next": { "fno": "06671799", "articleId": "12OmNz61dzi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2017/2943/0/2943a136", "title": "3D-FRC: Depiction of the future road course in the Head-Up-Display", "doi": null, "abstractUrl": "/proceedings-article/ismar/2017/2943a136/12OmNAo45DT", "parentPublication": { "id": "proceedings/ismar/2017/2943/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2000/0478/0/04780247", "title": "Accurate Image Overlay on Video See-Through HMDs Using Vision and Accelerometers", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780247/12OmNscOUc3", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836523", "title": "Human Attention and fatigue for AR Head-Up Displays", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836523/12OmNwFidbp", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223407", "title": "Optical see-through HUDs effect on depth judgments of real world objects", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223407/12OmNyRg4pk", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/05/mcg2015050034", "title": "Reducing Visual Discomfort with HMDs Using Dynamic Depth of Field", "doi": null, "abstractUrl": "/magazine/cg/2015/05/mcg2015050034/13rRUEgarvh", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08466859", "title": "Augmented Reality Interface Design Approaches for Goal-directed and Stimulus-driven Driving Tasks", "doi": null, "abstractUrl": "/journal/tg/2018/11/08466859/14M3E5b55mM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a876", "title": "HoloCMDS: Investigating Around Field of View Glanceable Commands Selection in AR-HMDs", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a876/1CJdZ8RwdnG", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a378", "title": "Enhancing the Reading Experience on AR HMDs by Using Smartphones as Assistive Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a378/1MNgGafxH4Q", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090511", "title": "Evaluating Automotive Augmented Reality Head-up Display Effects on Driver Performance and Distraction", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090511/1jIxviTG03C", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a080", "title": "Can Retinal Projection Displays Improve Spatial Perception in Augmented Reality?", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a080/1pysvYTZF6w", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvAiSpZ", "title": "2015 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNyeWdKP", "doi": "10.1109/VR.2015.7223449", "title": "Pocket-size augmented reality system for flight control", "normalizedTitle": "Pocket-size augmented reality system for flight control", "abstract": "Head-up displays (HUDs) have become common equipment in aircraft cockpits. One of the uses of HUDs is to provide a specific visual interface for pilots in the form of what is called a \"tunnel-in-the-sky\" (i.e. 3D geometry for the navigation path displayed on a flat screen). According to recent studies the \"tunnel-in-the-sky\" approach does not provide crucial advantages in comparison with more traditional methods of presenting navigation information to pilots. Our research considers a stereoscopic version of the 3D \"tunnel-in-the-sky\" realized as an augmented reality (AR) pocket-size system with see-through light-weight AR glasses. The system consists of low-cost items and does not suffer from the drawbacks tied with existing synthetic/enhanced vision systems for pilots. The results of the experiments with desktop simulators of different AR pilot's interfaces (2D, 3D and stereo 3D conditions) proved the effectiveness of the proposed stereo AR solution. A flight test of the prototype of the developed AR system was carried out on Cessna 172 aircraft and is showed in the accompanying video.", "abstracts": [ { "abstractType": "Regular", "content": "Head-up displays (HUDs) have become common equipment in aircraft cockpits. One of the uses of HUDs is to provide a specific visual interface for pilots in the form of what is called a \"tunnel-in-the-sky\" (i.e. 3D geometry for the navigation path displayed on a flat screen). According to recent studies the \"tunnel-in-the-sky\" approach does not provide crucial advantages in comparison with more traditional methods of presenting navigation information to pilots. Our research considers a stereoscopic version of the 3D \"tunnel-in-the-sky\" realized as an augmented reality (AR) pocket-size system with see-through light-weight AR glasses. The system consists of low-cost items and does not suffer from the drawbacks tied with existing synthetic/enhanced vision systems for pilots. The results of the experiments with desktop simulators of different AR pilot's interfaces (2D, 3D and stereo 3D conditions) proved the effectiveness of the proposed stereo AR solution. A flight test of the prototype of the developed AR system was carried out on Cessna 172 aircraft and is showed in the accompanying video.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Head-up displays (HUDs) have become common equipment in aircraft cockpits. One of the uses of HUDs is to provide a specific visual interface for pilots in the form of what is called a \"tunnel-in-the-sky\" (i.e. 3D geometry for the navigation path displayed on a flat screen). According to recent studies the \"tunnel-in-the-sky\" approach does not provide crucial advantages in comparison with more traditional methods of presenting navigation information to pilots. Our research considers a stereoscopic version of the 3D \"tunnel-in-the-sky\" realized as an augmented reality (AR) pocket-size system with see-through light-weight AR glasses. The system consists of low-cost items and does not suffer from the drawbacks tied with existing synthetic/enhanced vision systems for pilots. The results of the experiments with desktop simulators of different AR pilot's interfaces (2D, 3D and stereo 3D conditions) proved the effectiveness of the proposed stereo AR solution. A flight test of the prototype of the developed AR system was carried out on Cessna 172 aircraft and is showed in the accompanying video.", "fno": "07223449", "keywords": [ "Augmented Reality", "Aircraft", "Three Dimensional Displays", "Electronic Mail", "Aerospace Control", "Aircraft Navigation", "Flight Safety", "Augmented Reality", "Virtual Reality" ], "authors": [ { "affiliation": "Moscow State Technical University of Civil Aviation, Russia", "fullName": "Andrey L. Gorbunov", "givenName": "Andrey L.", "surname": "Gorbunov", "__typename": "ArticleAuthorType" }, { "affiliation": "Inglobe Technologies S.r.l., Italy", "fullName": "Alessandro Terenzi", "givenName": "Alessandro", "surname": "Terenzi", "__typename": "ArticleAuthorType" }, { "affiliation": "Inglobe Technologies S.r.l., Italy", "fullName": "Graziano Terenzi", "givenName": "Graziano", "surname": "Terenzi", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-03-01T00:00:00", "pubType": "proceedings", "pages": "369-369", "year": "2015", "issn": null, "isbn": "978-1-4799-1727-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07223448", "articleId": "12OmNz6iOvZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "07223450", "articleId": "12OmNrAv3Ap", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2010/9343/0/05643602", "title": "Camera pose navigation using Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643602/12OmNvA1hoG", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a049", "title": "The Ventriloquist Effect in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a049/12OmNvAiSE1", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811001", "title": "Explosion Diagrams in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811001/12OmNwE9OR0", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2002/1781/0/17810237", "title": "A Pragmatic Approach to Augmented Reality Authoring", "doi": null, "abstractUrl": "/proceedings-article/ismar/2002/17810237/12OmNxV4iuj", "parentPublication": { "id": "proceedings/ismar/2002/1781/0", "title": "Proceedings. International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549376", "title": "Touch experience in augmented reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549376/12OmNy2agRt", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08466859", "title": "Augmented Reality Interface Design Approaches for Goal-directed and Stimulus-driven Driving Tasks", "doi": null, "abstractUrl": "/journal/tg/2018/11/08466859/14M3E5b55mM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798340", "title": "Augmented Reality Map Navigation with Freehand Gestures", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798340/1cJ1fg0gjAY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090511", "title": "Evaluating Automotive Augmented Reality Head-up Display Effects on Driver Performance and Distraction", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090511/1jIxviTG03C", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a384", "title": "An Empirical Study of Size Discrimination in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a384/1yeQWO0csfe", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/2021/2744/0/09631438", "title": "Mobile Augmented Reality for Craniotomy Planning", "doi": null, "abstractUrl": "/proceedings-article/iscc/2021/09631438/1zmvEvuTSCI", "parentPublication": { "id": "proceedings/iscc/2021/2744/0", "title": "2021 IEEE Symposium on Computers and Communications (ISCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAsTgXc", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNzd7bLF", "doi": "10.1109/ICCVW.2011.6130369", "title": "More accurate pinhole camera calibration with imperfect planar target", "normalizedTitle": "More accurate pinhole camera calibration with imperfect planar target", "abstract": "This paper presents a novel approach to camera calibration that improves final accuracy with respect to standard methods using precision planar targets, even if now inaccurate, unmeasured, roughly planar targets can be used. The work builds on a recent trend in camera calibration, namely concurrent optimization of scene structure together with the intrinsic camera parameters [4, 8, 1]. A novel formulation is presented that allows maximum likelihood estimation in the case of inaccurate targets, as it extends the camera extrinsic parameters into a tight parametrization of the whole scene structure. It furthermore observes the special characteristics of multi-view perspective projection of planar targets. Its natural extensions to stereo camera calibration and hand-eye calibration are also presented. Experiments demonstrate improvements in the parametrization of the camera model as well as in eventual stereo reconstruction.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a novel approach to camera calibration that improves final accuracy with respect to standard methods using precision planar targets, even if now inaccurate, unmeasured, roughly planar targets can be used. The work builds on a recent trend in camera calibration, namely concurrent optimization of scene structure together with the intrinsic camera parameters [4, 8, 1]. A novel formulation is presented that allows maximum likelihood estimation in the case of inaccurate targets, as it extends the camera extrinsic parameters into a tight parametrization of the whole scene structure. It furthermore observes the special characteristics of multi-view perspective projection of planar targets. Its natural extensions to stereo camera calibration and hand-eye calibration are also presented. Experiments demonstrate improvements in the parametrization of the camera model as well as in eventual stereo reconstruction.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a novel approach to camera calibration that improves final accuracy with respect to standard methods using precision planar targets, even if now inaccurate, unmeasured, roughly planar targets can be used. The work builds on a recent trend in camera calibration, namely concurrent optimization of scene structure together with the intrinsic camera parameters [4, 8, 1]. A novel formulation is presented that allows maximum likelihood estimation in the case of inaccurate targets, as it extends the camera extrinsic parameters into a tight parametrization of the whole scene structure. It furthermore observes the special characteristics of multi-view perspective projection of planar targets. Its natural extensions to stereo camera calibration and hand-eye calibration are also presented. Experiments demonstrate improvements in the parametrization of the camera model as well as in eventual stereo reconstruction.", "fno": "06130369", "keywords": [ "Calibration", "Cameras", "Image Reconstruction", "Maximum Likelihood Estimation", "Stereo Image Processing", "Pinhole Camera Calibration", "Imperfect Planar Target", "Scene Structure Concurrent Optimization", "Maximum Likelihood Estimation", "Multiview Perspective Projection", "Stereo Camera Calibration", "Hand Eye Calibration", "Stereo Reconstruction", "Calibration", "Cameras", "Optimization", "Geometry", "Estimation", "Accuracy", "Optical Distortion" ], "authors": [ { "affiliation": "Institute of Robotics and Mechatronics, German Aerospace Center (DLR), Germany", "fullName": "Klaus H. Strobl", "givenName": "Klaus H.", "surname": "Strobl", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Robotics and Mechatronics, German Aerospace Center (DLR), Germany", "fullName": "Gerd Hirzinger", "givenName": "Gerd", "surname": "Hirzinger", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-11-01T00:00:00", "pubType": "proceedings", "pages": "1068-1075", "year": "2011", "issn": null, "isbn": "978-1-4673-0063-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06130368", "articleId": "12OmNyrqzu9", "__typename": "AdjacentArticleType" }, "next": { "fno": "06130370", "articleId": "12OmNzmLxIL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/1995/7042/0/70420034", "title": "Robot aerobics: four easy steps to a more flexible calibration", "doi": null, "abstractUrl": "/proceedings-article/iccv/1995/70420034/12OmNC4eSxw", "parentPublication": { "id": "proceedings/iccv/1995/7042/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477641", "title": "Forget the checkerboard: Practical self-calibration using a planar scene", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477641/12OmNqFrGxu", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2004/2244/0/01410482", "title": "Planar vanishing points based camera calibration", "doi": null, "abstractUrl": "/proceedings-article/icig/2004/01410482/12OmNxxdZAH", "parentPublication": { "id": "proceedings/icig/2004/2244/0", "title": "Proceedings. Third International Conference on Image and Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imvip/2011/0230/0/06167879", "title": "Capturing Optimal Image Networks for Planar Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/imvip/2011/06167879/12OmNyvGykw", "parentPublication": { "id": "proceedings/imvip/2011/0230/0", "title": "2011 Irish Machine Vision and Image Processing Conference (IMVIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1999/0149/1/01491022", "title": "Planar Catadioptric Stereo: Geometry and Calibration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1999/01491022/12OmNzUxO9t", "parentPublication": { "id": "proceedings/cvpr/1999/0149/2", "title": "Proceedings. 1999 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No PR00149)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icise/2009/3887/0/pid981890", "title": "A New Global Calibration Method Using Combined 3D Target", "doi": null, "abstractUrl": "/proceedings-article/icise/2009/pid981890/12OmNzaQobq", "parentPublication": { "id": "proceedings/icise/2009/3887/0", "title": "Information Science and Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699178", "title": "A Single-Shot-Per-Pose Camera-Projector Calibration System for Imperfect Planar Targets", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699178/19F1O0IjR8k", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500c325", "title": "Modeling dynamic target deformation in camera calibration", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500c325/1B13lZOanIc", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/4.803E44", "title": "Calibration of Axial Fisheye Cameras Through Generic Virtual Central Models", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/4.803E44/1hQqpDxzYbe", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAsTgXc", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNAKLZZK", "doi": "10.1109/ICCVW.2011.6130297", "title": "Holistic 3D reconstruction of urban structures from low-rank textures", "normalizedTitle": "Holistic 3D reconstruction of urban structures from low-rank textures", "abstract": "We introduce a new approach to reconstructing accurate camera geometry and 3D models for urban structures in a holistic fashion, i.e., without relying on extraction or matching of traditional local features such as points and edges. Instead, we use semi-global or global features based on transform invariant low-rank textures, which are ubiquitous in urban scenes. Modern high-dimensional optimization techniques enable us to accurately and robustly recover precise and consistent camera calibration and scene geometry from single or multiple images of the scene. We demonstrate how to construct 3D models of large-scale buildings from sequences of multiple large-baseline uncalibrated images that conventional SFM systems do not apply.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce a new approach to reconstructing accurate camera geometry and 3D models for urban structures in a holistic fashion, i.e., without relying on extraction or matching of traditional local features such as points and edges. Instead, we use semi-global or global features based on transform invariant low-rank textures, which are ubiquitous in urban scenes. Modern high-dimensional optimization techniques enable us to accurately and robustly recover precise and consistent camera calibration and scene geometry from single or multiple images of the scene. We demonstrate how to construct 3D models of large-scale buildings from sequences of multiple large-baseline uncalibrated images that conventional SFM systems do not apply.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce a new approach to reconstructing accurate camera geometry and 3D models for urban structures in a holistic fashion, i.e., without relying on extraction or matching of traditional local features such as points and edges. Instead, we use semi-global or global features based on transform invariant low-rank textures, which are ubiquitous in urban scenes. Modern high-dimensional optimization techniques enable us to accurately and robustly recover precise and consistent camera calibration and scene geometry from single or multiple images of the scene. We demonstrate how to construct 3D models of large-scale buildings from sequences of multiple large-baseline uncalibrated images that conventional SFM systems do not apply.", "fno": "06130297", "keywords": [ "Calibration", "Cameras", "Feature Extraction", "Image Reconstruction", "Image Texture", "Natural Scenes", "Optimisation", "Solid Modelling", "Town And Country Planning", "Urban Structures", "Camera Geometry Reconstruction", "3 D Models", "Holistic 3 D Reconstruction", "Semiglobal Features", "Global Features", "Transform Invariant Low Rank Textures", "Urban Scenes", "High Dimensional Optimization Technique", "Consistent Camera Calibration", "Scene Geometry", "Large Scale Buildings", "Multiple Large Baseline Uncalibrated Images", "Three Dimensional Displays", "Buildings", "Cameras", "Feature Extraction", "Image Coding", "Image Edge Detection", "Encoding" ], "authors": [ { "affiliation": "CS Dept., University of Illinois at Urbana-Champaign, USA", "fullName": "Hossein Mobahi", "givenName": "Hossein", "surname": "Mobahi", "__typename": "ArticleAuthorType" }, { "affiliation": "ECE Dept., University of Illinois at Urbana-Champaign, USA", "fullName": "Zihan Zhou", "givenName": "Zihan", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "EECS Dept., University of California, Berkeley, USA", "fullName": "Allen Y. Yang", "givenName": "Allen Y.", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "ECE Dept., University of Illinois at Urbana-Champaign, USA", "fullName": "Yi Ma", "givenName": "Yi", "surname": "Ma", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-11-01T00:00:00", "pubType": "proceedings", "pages": "593-600", "year": "2011", "issn": null, "isbn": "978-1-4673-0063-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06130296", "articleId": "12OmNvAAtJG", "__typename": "AdjacentArticleType" }, "next": { "fno": "06130298", "articleId": "12OmNApLGDL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/aipr/2005/2479/0/24790198", "title": "Content-Based 3D Mosaic Representation for Video of Dynamic 3D Scenes", "doi": null, "abstractUrl": "/proceedings-article/aipr/2005/24790198/12OmNBfZSmR", "parentPublication": { "id": "proceedings/aipr/2005/2479/0", "title": "34th Applied Imagery and Pattern Recognition Workshop (AIPR'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995462", "title": "Semantic structure from motion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995462/12OmNButq8E", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032a784", "title": "FoveaNet: Perspective-Aware Urban Scene Parsing", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a784/12OmNs4S8wO", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imvip/2011/0230/0/06167872", "title": "Single Image Augmented Reality Using Planar Structures in Urban Environments", "doi": null, "abstractUrl": "/proceedings-article/imvip/2011/06167872/12OmNvDZEZQ", "parentPublication": { "id": "proceedings/imvip/2011/0230/0", "title": "2011 Irish Machine Vision and Image Processing Conference (IMVIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118d978", "title": "Geometric Urban Geo-localization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118d978/12OmNym2bWG", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2015/8471/0/8471a094", "title": "Prior-Based Facade Rectification for AR in Urban Environment", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2015/8471a094/12OmNyr8Ye0", "parentPublication": { "id": "proceedings/ismarw/2015/8471/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457b629", "title": "Turning an Urban Scene Video into a Cinemagraph", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457b629/12OmNzYNN7K", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a362", "title": "An Image-Based Approach for 3D Reconstruction of Urban Scenes Using Architectural Symmetries", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a362/17D45WgziN9", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a616", "title": "Multi-planar Monocular Reconstruction of Manhattan Indoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a616/17D45XvMcbo", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797855", "title": "Streaming a Sequence of Textures for Adaptive 3D Scene Delivery", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797855/1cJ0SsosyEE", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrJiCCK", "title": "2011 International Conference on 3D Imaging, Modeling, Processing, Visualization and Transmission", "acronym": "3dimpvt", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNrAMF4z", "doi": "10.1109/3DIMPVT.2011.48", "title": "Visual Words for 3D Reconstruction and Pose Computation", "normalizedTitle": "Visual Words for 3D Reconstruction and Pose Computation", "abstract": "Visual vocabularies are standard tools in the object/image classification literature, and are emerging as a new tool for building point correspondences for pose estimation. This paper proposes several visual word based methods for point matching, with structure from motion and pose estimation applications in view. The three dimensional geometry of a scene is first extracted with bundle adjustment techniques based on the key point correspondences. These correspondences are obtained by grouping the set of all SIFT descriptors from the training images into visual words. We obtain a more accurate 3D geometry than with classical image-to-image point matching. In the second step, these visual words serve as 3D point descriptors robust to viewpoint change, and are then used for building 2D-3D correspondences for a test image, yielding the pose of the camera by solving the PnP problem. We compare several visual word formation techniques w.r.t robustness to viewpoint change between the learning and test images and discuss the required computational time.", "abstracts": [ { "abstractType": "Regular", "content": "Visual vocabularies are standard tools in the object/image classification literature, and are emerging as a new tool for building point correspondences for pose estimation. This paper proposes several visual word based methods for point matching, with structure from motion and pose estimation applications in view. The three dimensional geometry of a scene is first extracted with bundle adjustment techniques based on the key point correspondences. These correspondences are obtained by grouping the set of all SIFT descriptors from the training images into visual words. We obtain a more accurate 3D geometry than with classical image-to-image point matching. In the second step, these visual words serve as 3D point descriptors robust to viewpoint change, and are then used for building 2D-3D correspondences for a test image, yielding the pose of the camera by solving the PnP problem. We compare several visual word formation techniques w.r.t robustness to viewpoint change between the learning and test images and discuss the required computational time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visual vocabularies are standard tools in the object/image classification literature, and are emerging as a new tool for building point correspondences for pose estimation. This paper proposes several visual word based methods for point matching, with structure from motion and pose estimation applications in view. The three dimensional geometry of a scene is first extracted with bundle adjustment techniques based on the key point correspondences. These correspondences are obtained by grouping the set of all SIFT descriptors from the training images into visual words. We obtain a more accurate 3D geometry than with classical image-to-image point matching. In the second step, these visual words serve as 3D point descriptors robust to viewpoint change, and are then used for building 2D-3D correspondences for a test image, yielding the pose of the camera by solving the PnP problem. We compare several visual word formation techniques w.r.t robustness to viewpoint change between the learning and test images and discuss the required computational time.", "fno": "4369a326", "keywords": [ "Computational Geometry", "Image Classification", "Image Matching", "Image Reconstruction", "Motion Estimation", "Pose Estimation", "Visual Words", "3 D Reconstruction", "Visual Vocabularies", "Object Image Classification", "Point Matching", "Motion Estimation", "Pose Estimation", "SIFT Descriptors", "Training Images", "3 D Geometry", "Camera", "Pn P Problem", "Visualization", "Three Dimensional Displays", "Training", "Solid Modeling", "Cameras", "Buildings", "Feature Extraction", "Pose Computation", "Visual Word" ], "authors": [ { "affiliation": "INRIA/LORIA/Nancy-Universite, France", "fullName": "Srikrishna Bhat K.K.", "givenName": "Srikrishna", "surname": "Bhat K.K.", "__typename": "ArticleAuthorType" }, { "affiliation": "LORIA, Nancy-Univ., Vandoeuvre-les-Nancy, France", "fullName": "Marie-Odile Berger", "givenName": "Marie-Odile", "surname": "Berger", "__typename": "ArticleAuthorType" }, { "affiliation": "LORIA, Nancy-Univ., Vandoeuvre-les-Nancy, France", "fullName": "Frederic Sur", "givenName": "Frederic", "surname": "Sur", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dimpvt", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-05-01T00:00:00", "pubType": "proceedings", "pages": "326-333", "year": "2011", "issn": "1550-6185", "isbn": "978-1-61284-429-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4369a318", "articleId": "12OmNBBQZux", "__typename": "AdjacentArticleType" }, "next": { "fno": "4369a334", "articleId": "12OmNviHKiv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032a001", "title": "Globally-Optimal Inlier Set Maximisation for Simultaneous Camera Pose and Feature Correspondence", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a001/12OmNs0C9AW", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c704", "title": "Camera Pose Voting for Large-Scale Image-Based Localization", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c704/12OmNs5rkRv", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a416", "title": "Accelerated Relative Camera Pose from Oriented Features", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a416/12OmNx6g6qm", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118d978", "title": "Geometric Urban Geo-localization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118d978/12OmNym2bWG", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/02/08388302", "title": "Globally-Optimal Inlier Set Maximisation for Camera Pose and Correspondence Estimation", "doi": null, "abstractUrl": "/journal/tp/2020/02/08388302/13rRUx0gegI", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a136", "title": "Hybrid Camera Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a136/17D45WXIkI4", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/05/08611140", "title": "Minimal Case Relative Pose Computation Using Ray-Point-Ray Features", "doi": null, "abstractUrl": "/journal/tp/2020/05/08611140/17D45XERmmH", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300l1788", "title": "The Alignment of the Spheres: Globally-Optimal Spherical Mixture Alignment for Camera Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300l1788/1gyscEL6ng4", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800c927", "title": "Single-Stage 6D Object Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800c927/1m3nyLhWcyQ", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09411927", "title": "Better Prior Knowledge Improves Human-Pose-Based Extrinsic Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09411927/1tmiuaRjBLi", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAsTgXc", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNsd6vio", "doi": "10.1109/ICCVW.2011.6130316", "title": "An asymmetric real-time dense visual localisation and mapping system", "normalizedTitle": "An asymmetric real-time dense visual localisation and mapping system", "abstract": "This paper describes a dense tracking system (both monocular and multi-camera) which each perform in real-time (45Hz). The proposed approach combines a prior dense photometric model with online visual odometry which enables handling dynamic changes in the scene. In particular it will be shown how the technique takes into account large illumination variations and subsequently improves direct tracking techniques which are highly prone to illumination change. This is achieved by exploiting the relative advantages of both model-based and visual odometry techniques for tracking. In the case of direct model-based tracking, photometric models are usually acquired under significantly greater lighting differences than those observed by the current camera view, however, model-based approaches avoid drift. Incremental visual odometry, on the other hand, has relatively less lighting variation but integrates drift. To solve this problem a hybrid approach is proposed to simultaneously minimise drift via a 3D model whilst using locally consistent illumination to correct large photometric differences. Direct 6 dof tracking is performed by an accurate method, which directly minimizes dense image measurements iteratively, using non-linear optimisation. A stereo technique for automatically acquiring the 3D photometric model has also been optimised for the purpose of this paper. Real experiments are shown on complex 3D scenes for a hand-held camera undergoing fast 3D movement and various illumination changes including daylight, artificial-lights, significant shadows, non-Lambertian reflections, occlusions and saturations.", "abstracts": [ { "abstractType": "Regular", "content": "This paper describes a dense tracking system (both monocular and multi-camera) which each perform in real-time (45Hz). The proposed approach combines a prior dense photometric model with online visual odometry which enables handling dynamic changes in the scene. In particular it will be shown how the technique takes into account large illumination variations and subsequently improves direct tracking techniques which are highly prone to illumination change. This is achieved by exploiting the relative advantages of both model-based and visual odometry techniques for tracking. In the case of direct model-based tracking, photometric models are usually acquired under significantly greater lighting differences than those observed by the current camera view, however, model-based approaches avoid drift. Incremental visual odometry, on the other hand, has relatively less lighting variation but integrates drift. To solve this problem a hybrid approach is proposed to simultaneously minimise drift via a 3D model whilst using locally consistent illumination to correct large photometric differences. Direct 6 dof tracking is performed by an accurate method, which directly minimizes dense image measurements iteratively, using non-linear optimisation. A stereo technique for automatically acquiring the 3D photometric model has also been optimised for the purpose of this paper. Real experiments are shown on complex 3D scenes for a hand-held camera undergoing fast 3D movement and various illumination changes including daylight, artificial-lights, significant shadows, non-Lambertian reflections, occlusions and saturations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper describes a dense tracking system (both monocular and multi-camera) which each perform in real-time (45Hz). The proposed approach combines a prior dense photometric model with online visual odometry which enables handling dynamic changes in the scene. In particular it will be shown how the technique takes into account large illumination variations and subsequently improves direct tracking techniques which are highly prone to illumination change. This is achieved by exploiting the relative advantages of both model-based and visual odometry techniques for tracking. In the case of direct model-based tracking, photometric models are usually acquired under significantly greater lighting differences than those observed by the current camera view, however, model-based approaches avoid drift. Incremental visual odometry, on the other hand, has relatively less lighting variation but integrates drift. To solve this problem a hybrid approach is proposed to simultaneously minimise drift via a 3D model whilst using locally consistent illumination to correct large photometric differences. Direct 6 dof tracking is performed by an accurate method, which directly minimizes dense image measurements iteratively, using non-linear optimisation. A stereo technique for automatically acquiring the 3D photometric model has also been optimised for the purpose of this paper. Real experiments are shown on complex 3D scenes for a hand-held camera undergoing fast 3D movement and various illumination changes including daylight, artificial-lights, significant shadows, non-Lambertian reflections, occlusions and saturations.", "fno": "06130316", "keywords": [ "Distance Measurement", "Lighting", "Object Tracking", "Solid Modelling", "Asymmetric Real Time Dense Visual Localisation System", "Asymmetric Real Time Dense Visual Mapping System", "Dense Tracking System", "A Prior Dense Photometric Model", "Online Visual Odometry", "Illumination Variations", "Illumination Change", "Model Based Technique", "Visual Odometry Technique", "Direct Model Based Tracking", "Drift Avoidance", "Incremental Visual Odometry", "Drift Minimization", "3 D Photometric Model", "Nonlinear Optimisation", "Hand Held Camera", "Non Lambertian Reflections", "Occlusions", "Saturations", "Daylight", "Artificial Lights", "Significant Shadows", "Three Dimensional Displays", "Lighting", "Cameras", "Robustness", "Visualization", "Real Time Systems", "Solid Modeling" ], "authors": [ { "affiliation": "CNRS-I3S, UNSA, 2000 Route des Lucioles BP 121, Sophia Antipolis, France", "fullName": "Andrew I. Comport", "givenName": "Andrew I.", "surname": "Comport", "__typename": "ArticleAuthorType" }, { "affiliation": "INRIA Sophia Antipolis Méditerranée, 2004 Route des Lucioles BP 93, France", "fullName": "Maxime Meilland", "givenName": "Maxime", "surname": "Meilland", "__typename": "ArticleAuthorType" }, { "affiliation": "INRIA Sophia Antipolis Méditerranée, 2004 Route des Lucioles BP 93, France", "fullName": "Patrick Rives", "givenName": "Patrick", "surname": "Rives", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-11-01T00:00:00", "pubType": "proceedings", "pages": "700-703", "year": "2011", "issn": null, "isbn": "978-1-4673-0063-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06130315", "articleId": "12OmNBp52Bp", "__typename": "AdjacentArticleType" }, "next": { "fno": "06130317", "articleId": "12OmNynsbzn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032d923", "title": "Stereo DSO: Large-Scale Direct Sparse Visual Odometry with Stereo Cameras", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d923/12OmNApLGr1", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2014/4435/0/4435a132", "title": "Robust Visual Tracking for Retinal Mapping in Computer-Assisted Slit-Lamp Imaging", "doi": null, "abstractUrl": "/proceedings-article/cbms/2014/4435a132/12OmNBZpHbS", "parentPublication": { "id": "proceedings/cbms/2014/4435/0", "title": "2014 IEEE 27th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840b449", "title": "Semi-dense Visual Odometry for a Monocular Camera", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840b449/12OmNC1oT51", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2016/3641/0/3641a018", "title": "σ-DVO: Sensor Noise Model Meets Dense Visual Odometry", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a018/12OmNCwUmxA", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visapp/2014/8133/3/07295132", "title": "Face pose tracking under arbitrary illumination changes", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07295132/12OmNyuPL4X", "parentPublication": { "id": "proceedings/visapp/2014/8133/2", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2018/03/07898369", "title": "Direct Sparse Odometry", "doi": null, "abstractUrl": "/journal/tp/2018/03/07898369/13rRUB7a12l", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545263", "title": "Semantic-only Visual Odometry based on dense class-level segmentation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545263/17D45XDIXOV", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10086694", "title": "SDV-LOAM: Semi-Direct Visual-LiDAR Odometry and Mapping", "doi": null, "abstractUrl": "/journal/tp/5555/01/10086694/1LUpwXZtAe4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a443", "title": "Multi-Spectral Visual Odometry without Explicit Stereo Matching", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a443/1ezRDaaunNm", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b278", "title": "D3VO: Deep Depth, Deep Pose and Deep Uncertainty for Monocular Visual Odometry", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b278/1m3neRj6c1O", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKisA", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WrVgeD", "doi": "10.1109/AIVR.2018.00022", "title": "A Method to Build Multi-Scene Datasets for CNN for Camera Pose Regression", "normalizedTitle": "A Method to Build Multi-Scene Datasets for CNN for Camera Pose Regression", "abstract": "Convolutional neural networks (CNN) have shown to be useful for camera pose regression, and They have robust effects against some challenging scenarios such as lighting changes, motion blur, and scenes with lots of textureless surfaces. Additionally, PoseNet shows that the deep learning system can interpolate the camera pose in space between training images. In this paper, we explore how different strategies for processing datasets will affect the pose regression and propose a method for building multi-scene datasets for training such neural networks. We demonstrate that the location of several scenes can be remembered using only one neural network. By combining multiple scenes, we found that the position errors of the neural network do not decrease significantly as the distance between the cameras increases, which means that we do not need to train several models for the increase number of scenes. We also explore the impact factors that influence the accuracy of models for multi-scene camera pose regression, which can help us merge several scenes into one dataset in a better way. We opened our code and datasets to the public for better researches.", "abstracts": [ { "abstractType": "Regular", "content": "Convolutional neural networks (CNN) have shown to be useful for camera pose regression, and They have robust effects against some challenging scenarios such as lighting changes, motion blur, and scenes with lots of textureless surfaces. Additionally, PoseNet shows that the deep learning system can interpolate the camera pose in space between training images. In this paper, we explore how different strategies for processing datasets will affect the pose regression and propose a method for building multi-scene datasets for training such neural networks. We demonstrate that the location of several scenes can be remembered using only one neural network. By combining multiple scenes, we found that the position errors of the neural network do not decrease significantly as the distance between the cameras increases, which means that we do not need to train several models for the increase number of scenes. We also explore the impact factors that influence the accuracy of models for multi-scene camera pose regression, which can help us merge several scenes into one dataset in a better way. We opened our code and datasets to the public for better researches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Convolutional neural networks (CNN) have shown to be useful for camera pose regression, and They have robust effects against some challenging scenarios such as lighting changes, motion blur, and scenes with lots of textureless surfaces. Additionally, PoseNet shows that the deep learning system can interpolate the camera pose in space between training images. In this paper, we explore how different strategies for processing datasets will affect the pose regression and propose a method for building multi-scene datasets for training such neural networks. We demonstrate that the location of several scenes can be remembered using only one neural network. By combining multiple scenes, we found that the position errors of the neural network do not decrease significantly as the distance between the cameras increases, which means that we do not need to train several models for the increase number of scenes. We also explore the impact factors that influence the accuracy of models for multi-scene camera pose regression, which can help us merge several scenes into one dataset in a better way. We opened our code and datasets to the public for better researches.", "fno": "926900a108", "keywords": [ "Cameras", "Feature Extraction", "Feedforward Neural Nets", "Image Motion Analysis", "Learning Artificial Intelligence", "Object Detection", "Pose Estimation", "Regression Analysis", "Neural Network", "Cameras Increases", "Multiscene Camera", "Build Multiscene Datasets", "CNN", "Camera Pose Regression", "Convolutional Neural Networks", "Robust Effects", "Lighting Changes", "Motion Blur", "Textureless Surfaces", "Deep Learning System", "Cameras", "Training", "Three Dimensional Displays", "Neural Networks", "Buildings", "Visualization", "Task Analysis", "Visual Localization", "Camera Pose Estimation", "Convolutional Neural Network", "Dataset" ], "authors": [ { "affiliation": null, "fullName": "Yuhao Ma", "givenName": "Yuhao", "surname": "Ma", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hao Guo", "givenName": "Hao", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hong Chen", "givenName": "Hong", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mengxiao Tian", "givenName": "Mengxiao", "surname": "Tian", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xin Huo", "givenName": "Xin", "surname": "Huo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chengjiang Long", "givenName": "Chengjiang", "surname": "Long", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shiye Tang", "givenName": "Shiye", "surname": "Tang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xiaoyu Song", "givenName": "Xiaoyu", "surname": "Song", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qing Wang", "givenName": "Qing", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-12-01T00:00:00", "pubType": "proceedings", "pages": "108-115", "year": "2018", "issn": null, "isbn": "978-1-5386-9269-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "926900a100", "articleId": "17D45Xh13v2", "__typename": "AdjacentArticleType" }, "next": { "fno": "926900a116", "articleId": "17D45Xi9rWr", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2016/5407/0/5407a685", "title": "Learning Camera Viewpoint Using CNN to Improve 3D Body Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a685/12OmNqFa5pt", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2017/2610/0/261001a506", "title": "Monocular 3D Human Pose Estimation in the Wild Using Improved CNN Supervision", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a506/12OmNxdDFF9", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200c713", "title": "Learning Multi-Scene Absolute Pose Regression with Transformers", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200c713/1BmFHZcKd3y", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a722", "title": "OoD-Pose: Camera Pose Regression From Out-of-Distribution Synthetic Views", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a722/1KYsoOpcxmU", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300d297", "title": "Understanding the Limitations of CNN-Based Absolute Camera Pose Regression", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300d297/1gyrnhVtmg0", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c282", "title": "Resolving 3D Human Pose Ambiguities With 3D Scene Constraints", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c282/1hVlg3qjlHq", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d778", "title": "Adversarial Networks for Camera Pose Regression and Refinement", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d778/1i5myUiQLu0", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900j954", "title": "Learning Neural Representation of Camera Pose with Matrix Representation of Pose Shift via View Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900j954/1yeIwKmue4w", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900l1881", "title": "Multi-View Multi-Person 3D Pose Estimation with Plane Sweep Stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900l1881/1yeJb6FUA4E", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a145", "title": "Visual Camera Re-Localization Using Graph Neural Networks and Relative Pose Supervision", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a145/1zWElGX2NaM", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45XDIXTF", "doi": "10.1109/CVPR.2018.00752", "title": "InLoc: Indoor Visual Localization with Dense Matching and View Synthesis", "normalizedTitle": "InLoc: Indoor Visual Localization with Dense Matching and View Synthesis", "abstract": "We seek to predict the 6 degree-of-freedom (6DoF) pose of a query photograph with respect to a large indoor 3D map. The contributions of this work are three-fold. First, we develop a new large-scale visual localization method targeted for indoor environments. The method proceeds along three steps: (i) efficient retrieval of candidate poses that ensures scalability to large-scale environments, (ii) pose estimation using dense matching rather than local features to deal with texture less indoor scenes, and (iii) pose verification by virtual view synthesis to cope with significant changes in viewpoint, scene layout, and occluders. Second, we collect a new dataset with reference 6DoF poses for large-scale indoor localization. Query photographs are captured by mobile phones at a different time than the reference 3D map, thus presenting a realistic indoor localization scenario. Third, we demonstrate that our method significantly outperforms current state-of-the-art indoor localization approaches on this new challenging data.", "abstracts": [ { "abstractType": "Regular", "content": "We seek to predict the 6 degree-of-freedom (6DoF) pose of a query photograph with respect to a large indoor 3D map. The contributions of this work are three-fold. First, we develop a new large-scale visual localization method targeted for indoor environments. The method proceeds along three steps: (i) efficient retrieval of candidate poses that ensures scalability to large-scale environments, (ii) pose estimation using dense matching rather than local features to deal with texture less indoor scenes, and (iii) pose verification by virtual view synthesis to cope with significant changes in viewpoint, scene layout, and occluders. Second, we collect a new dataset with reference 6DoF poses for large-scale indoor localization. Query photographs are captured by mobile phones at a different time than the reference 3D map, thus presenting a realistic indoor localization scenario. Third, we demonstrate that our method significantly outperforms current state-of-the-art indoor localization approaches on this new challenging data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We seek to predict the 6 degree-of-freedom (6DoF) pose of a query photograph with respect to a large indoor 3D map. The contributions of this work are three-fold. First, we develop a new large-scale visual localization method targeted for indoor environments. The method proceeds along three steps: (i) efficient retrieval of candidate poses that ensures scalability to large-scale environments, (ii) pose estimation using dense matching rather than local features to deal with texture less indoor scenes, and (iii) pose verification by virtual view synthesis to cope with significant changes in viewpoint, scene layout, and occluders. Second, we collect a new dataset with reference 6DoF poses for large-scale indoor localization. Query photographs are captured by mobile phones at a different time than the reference 3D map, thus presenting a realistic indoor localization scenario. Third, we demonstrate that our method significantly outperforms current state-of-the-art indoor localization approaches on this new challenging data.", "fno": "642000h199", "keywords": [ "Image Matching", "Image Retrieval", "Image Texture", "Pose Estimation", "Indoor 3 D Map Environments", "6 Do F Pose Prediction", "6 Degree Of Freedom Pose Prediction", "Pose Estimation", "Large Scale Indoor Visual Localization Method", "Mobile Phone", "Virtual View Synthesis", "Query Photograph", "In Loc", "Reference 3 D Map", "Three Dimensional Displays", "Visualization", "Cameras", "Buildings", "Lighting", "Distributed Databases" ], "authors": [ { "affiliation": null, "fullName": "Hajime Taira", "givenName": "Hajime", "surname": "Taira", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Masatoshi Okutomi", "givenName": "Masatoshi", "surname": "Okutomi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Torsten Sattler", "givenName": "Torsten", "surname": "Sattler", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mircea Cimpoi", "givenName": "Mircea", "surname": "Cimpoi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Marc Pollefeys", "givenName": "Marc", "surname": "Pollefeys", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Josef Sivic", "givenName": "Josef", "surname": "Sivic", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tomas Pajdla", "givenName": "Tomas", "surname": "Pajdla", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Akihiko Torii", "givenName": "Akihiko", "surname": "Torii", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "7199-7209", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000h190", "articleId": "17D45W9KVJW", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000h210", "articleId": "17D45Wda7gg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/comgeo/2013/5012/0/06602042", "title": "Image Based Localization in Indoor Environments", "doi": null, "abstractUrl": "/proceedings-article/comgeo/2013/06602042/12OmNqBbHA1", "parentPublication": { "id": "proceedings/comgeo/2013/5012/0", "title": "2013 4th International Conference on Computing for Geospatial Research & Application (COM.Geo)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457g175", "title": "Are Large-Scale 3D Models Really Necessary for Accurate Visual Localization?", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457g175/12OmNxWcHch", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/11/07164332", "title": "Instant Outdoor Localization and SLAM Initialization from 2.5D Maps", "doi": null, "abstractUrl": "/journal/tg/2015/11/07164332/13rRUxBa5c1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000i601", "title": "Benchmarking 6DOF Outdoor Visual Localization in Changing Conditions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000i601/17D45W2Wyzk", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699254", "title": "Learning 6DoF Object Poses from Synthetic Single Channel Images", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699254/19F1NG6YVO0", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/04/08894513", "title": "InLoc: Indoor Visual Localization with Dense Matching and View Synthesis", "doi": null, "abstractUrl": "/journal/tp/2021/04/08894513/1eNb5IXi5iM", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300e372", "title": "Is This the Right Place? Geometric-Semantic Pose Verification for Indoor Visual Localization", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300e372/1hVlDrmcyDC", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/04/09229078", "title": "Long-Term Visual Localization Revisited", "doi": null, "abstractUrl": "/journal/tp/2022/04/09229078/1o3ni47nYZi", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413258", "title": "RISEdb: a Novel Indoor Localization Dataset", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413258/1tmidUG5Yqc", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a080", "title": "BDLoc: Global Localization from 2.5D Building Map", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a080/1yeCVFqROFO", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1NWAhFO8", "doi": "10.1109/ISMAR-Adjunct.2018.00122", "title": "Live Collaborative Large-Scale Dense 3D Reconstruction Using Consumer-Grade Hardware", "normalizedTitle": "Live Collaborative Large-Scale Dense 3D Reconstruction Using Consumer-Grade Hardware", "abstract": "We present a real-time system for collaboratively reconstructing dense volumetric models of large 3D scenes (see Figure 1). Reconstructing such models is important for many tasks – e.g. content creation for films and games [10], augmented reality [19], cultural heritage preservation [21] and building information modelling [14] – but capturing large scenes can take significant time, and the risk of transient changes to the scene (e.g. people moving around) goes up as the capture time increases, corrupting the model and forcing the user to restart the capture. There are thus good reasons to want instead to split the capture into several shorter sequences, which can be captured either over multiple sessions or in parallel (by multiple users) and then joined to make the whole scene.", "abstracts": [ { "abstractType": "Regular", "content": "We present a real-time system for collaboratively reconstructing dense volumetric models of large 3D scenes (see Figure 1). Reconstructing such models is important for many tasks – e.g. content creation for films and games [10], augmented reality [19], cultural heritage preservation [21] and building information modelling [14] – but capturing large scenes can take significant time, and the risk of transient changes to the scene (e.g. people moving around) goes up as the capture time increases, corrupting the model and forcing the user to restart the capture. There are thus good reasons to want instead to split the capture into several shorter sequences, which can be captured either over multiple sessions or in parallel (by multiple users) and then joined to make the whole scene.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a real-time system for collaboratively reconstructing dense volumetric models of large 3D scenes (see Figure 1). Reconstructing such models is important for many tasks – e.g. content creation for films and games [10], augmented reality [19], cultural heritage preservation [21] and building information modelling [14] – but capturing large scenes can take significant time, and the risk of transient changes to the scene (e.g. people moving around) goes up as the capture time increases, corrupting the model and forcing the user to restart the capture. There are thus good reasons to want instead to split the capture into several shorter sequences, which can be captured either over multiple sessions or in parallel (by multiple users) and then joined to make the whole scene.", "fno": "08699243", "keywords": [ "Three Dimensional Displays", "Buildings", "Collaboration", "Real Time Systems", "Cameras", "Servers", "Hardware" ], "authors": [ { "affiliation": null, "fullName": "Stuart Golodetz", "givenName": "Stuart", "surname": "Golodetz", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tommaso Cavallari", "givenName": "Tommaso", "surname": "Cavallari", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Nicholas A. Lord", "givenName": "Nicholas A.", "surname": "Lord", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Engineering Science, University of Oxford", "fullName": "Victor A. Prisacariu", "givenName": "Victor A.", "surname": "Prisacariu", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Engineering Science, University of Oxford", "fullName": "David W. Murray", "givenName": "David W.", "surname": "Murray", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Engineering Science, University of Oxford", "fullName": "Philip H. S. Torr", "givenName": "Philip H. S.", "surname": "Torr", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "413-414", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699278", "articleId": "19F1NJTrBfi", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699237", "articleId": "19F1LM85ZPq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2011/0063/0/06130297", "title": "Holistic 3D reconstruction of urban structures from low-rank textures", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130297/12OmNAKLZZK", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2017/4822/0/07926682", "title": "Densification of Semi-Dense Reconstructions for Novel View Generation of Live Scenes", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926682/12OmNBPc8wD", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840a473", "title": "Elastic Fragments for Dense Scene Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840a473/12OmNrYlmPh", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c201", "title": "Exploiting Object Similarity in 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c201/12OmNvnOwyO", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a028", "title": "Real-Time Surface of Revolution Reconstruction on Dense SLAM", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a028/12OmNy2Jt2J", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2015/9711/0/5720a814", "title": "Dense Rigid Reconstruction from Unstructured Discontinuous Video", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a814/12OmNzFv4gm", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2015/1986/0/1986a086", "title": "Dense Depth Map Reconstruction from Sparse Measurements Using a Multilayer Conditional Random Field Model", "doi": null, "abstractUrl": "/proceedings-article/crv/2015/1986a086/12OmNzmtWBI", "parentPublication": { "id": "proceedings/crv/2015/1986/0", "title": "2015 12th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08492363", "title": "Collaborative Large-Scale Dense 3D Reconstruction with Online Inter-Agent Pose Optimisation", "doi": null, "abstractUrl": "/journal/tg/2018/11/08492363/14M3E1hwrFS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1323", "title": "Learning Motion Priors for 4D Human Body Capture in 3D Scenes", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1323/1BmLs4NuZAQ", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c172", "title": "3D Scene Reconstruction With Multi-Layer Depth and Epipolar Transformers", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c172/1hVlfLRJFS0", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeKAFaHFC0", "doi": "10.1109/CVPR46437.2021.00430", "title": "Human POSEitioning System (HPS): 3D Human Pose Estimation and Self-localization in Large Scenes from Body-Mounted Sensors", "normalizedTitle": "Human POSEitioning System (HPS): 3D Human Pose Estimation and Self-localization in Large Scenes from Body-Mounted Sensors", "abstract": "We introduce (HPS) Human POSEitioning System, a method to recover the full 3D pose of a human registered with a 3D scan of the surrounding environment using wearable sensors. Using IMUs attached at the body limbs and a head mounted camera looking outwards, HPS fuses camera based self-localization with IMU-based human body tracking. The former provides drift-free but noisy position and orientation estimates while the latter is accurate in the short-term but subject to drift over longer periods of time.We show that our optimization-based integration exploits the benefits of the two, resulting in pose accuracy free of drift. Furthermore, we integrate 3D scene constraints into our optimization, such as foot contact with the ground, resulting in physically plausible motion. HPS complements more common third-person-based 3D pose estimation methods. It allows capturing larger recording volumes and longer periods of motion, and could be used for VR/AR ap plications where humans interact with the scene without requiring direct line of sight with an external camera, or to train agents that navigate and interact with the environment based on first-person visual input, like real humans.With HPS, we recorded a dataset of humans interacting with large 3D scenes (300-1000 m<sup>2</sup>) consisting of 7 subjects and more than 3 hours of diverse motion. The dataset, code and video will be available on the project page: http://virtualhumans.mpi-inf.mpg.de/hps/.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce (HPS) Human POSEitioning System, a method to recover the full 3D pose of a human registered with a 3D scan of the surrounding environment using wearable sensors. Using IMUs attached at the body limbs and a head mounted camera looking outwards, HPS fuses camera based self-localization with IMU-based human body tracking. The former provides drift-free but noisy position and orientation estimates while the latter is accurate in the short-term but subject to drift over longer periods of time.We show that our optimization-based integration exploits the benefits of the two, resulting in pose accuracy free of drift. Furthermore, we integrate 3D scene constraints into our optimization, such as foot contact with the ground, resulting in physically plausible motion. HPS complements more common third-person-based 3D pose estimation methods. It allows capturing larger recording volumes and longer periods of motion, and could be used for VR/AR ap plications where humans interact with the scene without requiring direct line of sight with an external camera, or to train agents that navigate and interact with the environment based on first-person visual input, like real humans.With HPS, we recorded a dataset of humans interacting with large 3D scenes (300-1000 m<sup>2</sup>) consisting of 7 subjects and more than 3 hours of diverse motion. The dataset, code and video will be available on the project page: http://virtualhumans.mpi-inf.mpg.de/hps/.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce (HPS) Human POSEitioning System, a method to recover the full 3D pose of a human registered with a 3D scan of the surrounding environment using wearable sensors. Using IMUs attached at the body limbs and a head mounted camera looking outwards, HPS fuses camera based self-localization with IMU-based human body tracking. The former provides drift-free but noisy position and orientation estimates while the latter is accurate in the short-term but subject to drift over longer periods of time.We show that our optimization-based integration exploits the benefits of the two, resulting in pose accuracy free of drift. Furthermore, we integrate 3D scene constraints into our optimization, such as foot contact with the ground, resulting in physically plausible motion. HPS complements more common third-person-based 3D pose estimation methods. It allows capturing larger recording volumes and longer periods of motion, and could be used for VR/AR ap plications where humans interact with the scene without requiring direct line of sight with an external camera, or to train agents that navigate and interact with the environment based on first-person visual input, like real humans.With HPS, we recorded a dataset of humans interacting with large 3D scenes (300-1000 m2) consisting of 7 subjects and more than 3 hours of diverse motion. The dataset, code and video will be available on the project page: http://virtualhumans.mpi-inf.mpg.de/hps/.", "fno": "450900e316", "keywords": [ "Body Sensor Networks", "Cameras", "Image Motion Analysis", "Inertial Navigation", "Optimisation", "Pose Estimation", "Sensor Placement", "Wearable Sensors", "Body Mounted Sensors", "Wearable Sensors", "Body Limbs", "HPS Fuses Camera Based Self Localization", "IMU Based Human Body Tracking", "Optimization Based Integration", "HPS Complements", "Third Person Based 3 D Pose Estimation Methods", "3 D Human Pose Estimation", "Human Positioning System", "HPS", "Head Mounted Camera", "Physically Plausible Motion", "Visualization", "Three Dimensional Displays", "Navigation", "Pose Estimation", "Cameras", "Sensor Systems", "Pattern Recognition" ], "authors": [ { "affiliation": "University of Tübingen,Germany", "fullName": "Vladimir Guzov", "givenName": "Vladimir", "surname": "Guzov", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Tübingen,Germany", "fullName": "Aymen Mir", "givenName": "Aymen", "surname": "Mir", "__typename": "ArticleAuthorType" }, { "affiliation": "Czech Technical University in Prague,CIIRC,Czech Republic", "fullName": "Torsten Sattler", "givenName": "Torsten", "surname": "Sattler", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Tübingen,Germany", "fullName": "Gerard Pons-Moll", "givenName": "Gerard", "surname": "Pons-Moll", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "4316-4327", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1yeKABysyR2", "name": "pcvpr202145090-09578298s1-mm_450900e316.zip", "size": "2.24 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09578298s1-mm_450900e316.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "450900e306", "articleId": "1yeLCBiTUdO", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900e328", "articleId": "1yeIhZvBBgA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2016/5407/0/5407a685", "title": "Learning Camera Viewpoint Using CNN to Improve 3D Body Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a685/12OmNqFa5pt", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000i437", "title": "Learning Monocular 3D Human Pose Estimation from Multi-view Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000i437/17D45Vw15so", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200l1015", "title": "SPEC: Seeing People in the Wild with an Estimated Camera", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200l1015/1BmG6uLKFgY", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3264", "title": "Capturing and Inferring Dense Full-Body Human-Scene Contact", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3264/1H1mTqlw5pe", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a001", "title": "MoCapDeform: Monocular 3D Human Motion Capture in Deformable Scenes", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a001/1KYso7Sd0Zy", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600c870", "title": "ElliPose: Stereoscopic 3D Human Pose Estimation by Fitting Ellipsoids", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600c870/1L8qlaa9lOU", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a405", "title": "Multi-Person 3D Human Pose Estimation from Monocular Images", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a405/1ezRBMjoJxu", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a616", "title": "Mobile. Egocentric Human Body Motion Reconstruction Using Only Eyeglasses-mounted Cameras and a Few Body-worn Inertial Sensors", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a616/1tuAWRhj1Uk", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700a081", "title": "Temporally Consistent 3D Human Pose Estimation Using Dual 360&#x00B0; Cameras", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700a081/1uqGhVR6c2k", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900j985", "title": "On Self-Contact and Human Pose", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900j985/1yeI73M1Lc4", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "14jQfMYohco", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "14jQfP3PmvM", "doi": "10.1109/ICME.2018.8486588", "title": "Depth Aware Portrait Segmentation Using Dual Focus Images", "normalizedTitle": "Depth Aware Portrait Segmentation Using Dual Focus Images", "abstract": "The rapid development of camera in hand-held devices and the emergence of social media has led to an uprise in capturing self-portrait images. Augmenting these images for beautification or applying special effects to mimic DSLR camera has become a popular practice. Most of these effects require separation of foreground from background where the effect can be applied solely on background. To employ such effects on portrait (upper half of human body) images, a pixel-accurate segmentation is imperative. In this paper, we propose an effective method of fast depth aware CNN based portrait segmentation from monocular images. The proposed method is capable of being deployed on mobile phones, within the constraints of time and memory. On the segmented images, we demonstrate the application of bokeh effect, which blurs out-of-focus regions. We experiment with different combinations of state of the art encoder and decoder networks for segmentation and infer that our proposed method can improve the inference speed by 76 ms on mobile device while maintaining an accuracy of 97.0%.", "abstracts": [ { "abstractType": "Regular", "content": "The rapid development of camera in hand-held devices and the emergence of social media has led to an uprise in capturing self-portrait images. Augmenting these images for beautification or applying special effects to mimic DSLR camera has become a popular practice. Most of these effects require separation of foreground from background where the effect can be applied solely on background. To employ such effects on portrait (upper half of human body) images, a pixel-accurate segmentation is imperative. In this paper, we propose an effective method of fast depth aware CNN based portrait segmentation from monocular images. The proposed method is capable of being deployed on mobile phones, within the constraints of time and memory. On the segmented images, we demonstrate the application of bokeh effect, which blurs out-of-focus regions. We experiment with different combinations of state of the art encoder and decoder networks for segmentation and infer that our proposed method can improve the inference speed by 76 ms on mobile device while maintaining an accuracy of 97.0%.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The rapid development of camera in hand-held devices and the emergence of social media has led to an uprise in capturing self-portrait images. Augmenting these images for beautification or applying special effects to mimic DSLR camera has become a popular practice. Most of these effects require separation of foreground from background where the effect can be applied solely on background. To employ such effects on portrait (upper half of human body) images, a pixel-accurate segmentation is imperative. In this paper, we propose an effective method of fast depth aware CNN based portrait segmentation from monocular images. The proposed method is capable of being deployed on mobile phones, within the constraints of time and memory. On the segmented images, we demonstrate the application of bokeh effect, which blurs out-of-focus regions. We experiment with different combinations of state of the art encoder and decoder networks for segmentation and infer that our proposed method can improve the inference speed by 76 ms on mobile device while maintaining an accuracy of 97.0%.", "fno": "08486588", "keywords": [ "Cameras", "Image Colour Analysis", "Image Segmentation", "Fast Depth Aware CNN", "Monocular Images", "Segmented Images", "Bokeh Effect", "Out Of Focus Regions", "Mobile Device", "Depth Aware Portrait Segmentation", "Dual Focus Images", "Hand Held Devices", "Social Media", "Self Portrait Images", "DSLR Camera", "Pixel Accurate Segmentation", "Beautification Effects", "Image Segmentation", "Cameras", "Lenses", "Apertures", "Mobile Handsets", "Convolution", "Kernel", "Human Segmentation", "Dual Focus", "Deep Learning", "Depth Map", "Bokeh Effect" ], "authors": [ { "affiliation": "Samsung R&D Institute, Bangalore", "fullName": "Nitin Singh", "givenName": "Nitin", "surname": "Singh", "__typename": "ArticleAuthorType" }, { "affiliation": "Samsung R&D Institute, Bangalore", "fullName": "Manoj Kumar", "givenName": "Manoj", "surname": "Kumar", "__typename": "ArticleAuthorType" }, { "affiliation": "Samsung R&D Institute, Bangalore", "fullName": "P.J. Mahesh", "givenName": "P.J.", "surname": "Mahesh", "__typename": "ArticleAuthorType" }, { "affiliation": "Samsung R&D Institute, Bangalore", "fullName": "Rituparna Sarkar", "givenName": "Rituparna", "surname": "Sarkar", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2018", "issn": null, "isbn": "978-1-5386-1737-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08486540", "articleId": "14jQfO2OpDv", "__typename": "AdjacentArticleType" }, "next": { "fno": "08486520", "articleId": "14jQfQ4h77X", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/crv/2016/2491/0/2491a273", "title": "What is a Good Model for Depth from Defocus?", "doi": null, "abstractUrl": "/proceedings-article/crv/2016/2491a273/12OmNvCzFe8", "parentPublication": { "id": "proceedings/crv/2016/2491/0", "title": "2016 13th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2009/4534/0/05559009", "title": "Image destabilization: Programmable defocus using lens and sensor motion", "doi": null, "abstractUrl": "/proceedings-article/iccp/2009/05559009/12OmNvxsSTw", "parentPublication": { "id": "proceedings/iccp/2009/4534/0", "title": "IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1991/2148/0/00139760", "title": "A matrix based method for determining depth from focus", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1991/00139760/12OmNx3HI26", "parentPublication": { "id": "proceedings/cvpr/1991/2148/0", "title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2022/5824/0/582400a202", "title": "PP-HumanSeg: Connectivity-Aware Portrait Segmentation with a Large-Scale Teleconferencing Video Dataset", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2022/582400a202/1B12shOgtt6", "parentPublication": { "id": "proceedings/wacvw/2022/5824/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956311", "title": "Learning Multi-Granularity Features for Re-Identifying Figures in Portrait Thangka Images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956311/1IHqonUTYK4", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a729", "title": "Real-time Shadow-aware Portrait Relighting in Virtual Backgrounds for Realistic Telepresence", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a729/1JrRlRCFiz6", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2019/0089/0/08756516", "title": "Boundary-sensitive Network for Portrait Segmentation", "doi": null, "abstractUrl": "/proceedings-article/fg/2019/08756516/1bzYocxpXX2", "parentPublication": { "id": "proceedings/fg/2019/0089/0", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b630", "title": "Portrait Instance Segmentation for Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b630/1cdOF9FMtqg", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d417", "title": "Depth-Guided Dense Dynamic Filtering Network for Bokeh Effect Rendering", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d417/1i5mB3Uf3gI", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093588", "title": "SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and Information Blocking Decoder", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093588/1jPbl6978as", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0O5YpzLW", "doi": "10.1109/VR.2019.8797780", "title": "[DC] Auto-focus Augmented Reality Eyeglasses for both Real World and Virtual Imagery", "normalizedTitle": "[DC] Auto-focus Augmented Reality Eyeglasses for both Real World and Virtual Imagery", "abstract": "Near-eye displays presenting accommodation cues, thereby mitigating the vergence-accommodation conflict, have garnered interest in the recent past. However, considering that at least 40% of US population is presbyopic and similarly a sizable world population suffering other refractive errors in eye, it requires that the users wear their prescription glasses along with the AR goggles, despite focus support for virtual imagery, making the overall experience uncomfortable. In the recent work published at ISMAR-TVCG 2018, which won a Best Paper Award, my collaborators and I presented an AR display which can automatically adjust for focus for both real and virtual imagery, avoiding an extra pair of prescription correcting glasses along with AR glasses. My recent work has been on a near-eye display design which integrates with the bifocals of a presbyopic user, thereby providing depth dependent stimuli to the user who is already well adapted to bifocal lenses. A variant of these ideas are going to be presented at IEEE VR 2018 through our accepted TVCG paper. I propose that the above mentioned works combined with my future work on integrating eye trackers and depth sensors to make the display glasses more robust and completely automatic, followed by evaluating the perceptual qualities of the display are the topics of my dissertation.", "abstracts": [ { "abstractType": "Regular", "content": "Near-eye displays presenting accommodation cues, thereby mitigating the vergence-accommodation conflict, have garnered interest in the recent past. However, considering that at least 40% of US population is presbyopic and similarly a sizable world population suffering other refractive errors in eye, it requires that the users wear their prescription glasses along with the AR goggles, despite focus support for virtual imagery, making the overall experience uncomfortable. In the recent work published at ISMAR-TVCG 2018, which won a Best Paper Award, my collaborators and I presented an AR display which can automatically adjust for focus for both real and virtual imagery, avoiding an extra pair of prescription correcting glasses along with AR glasses. My recent work has been on a near-eye display design which integrates with the bifocals of a presbyopic user, thereby providing depth dependent stimuli to the user who is already well adapted to bifocal lenses. A variant of these ideas are going to be presented at IEEE VR 2018 through our accepted TVCG paper. I propose that the above mentioned works combined with my future work on integrating eye trackers and depth sensors to make the display glasses more robust and completely automatic, followed by evaluating the perceptual qualities of the display are the topics of my dissertation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Near-eye displays presenting accommodation cues, thereby mitigating the vergence-accommodation conflict, have garnered interest in the recent past. However, considering that at least 40% of US population is presbyopic and similarly a sizable world population suffering other refractive errors in eye, it requires that the users wear their prescription glasses along with the AR goggles, despite focus support for virtual imagery, making the overall experience uncomfortable. In the recent work published at ISMAR-TVCG 2018, which won a Best Paper Award, my collaborators and I presented an AR display which can automatically adjust for focus for both real and virtual imagery, avoiding an extra pair of prescription correcting glasses along with AR glasses. My recent work has been on a near-eye display design which integrates with the bifocals of a presbyopic user, thereby providing depth dependent stimuli to the user who is already well adapted to bifocal lenses. A variant of these ideas are going to be presented at IEEE VR 2018 through our accepted TVCG paper. I propose that the above mentioned works combined with my future work on integrating eye trackers and depth sensors to make the display glasses more robust and completely automatic, followed by evaluating the perceptual qualities of the display are the topics of my dissertation.", "fno": "08797780", "keywords": [ "Augmented Reality", "Eye", "Gaussian Processes", "Helmet Mounted Displays", "Image Enhancement", "Image Resolution", "Ophthalmic Lenses", "Three Dimensional Displays", "Vision Defects", "Virtual Imagery", "Near Eye Displays", "Vergence Accommodation Conflict", "US Population", "Sizable World Population", "Refractive Errors", "Experience Uncomfortable", "ISMAR TVCG", "Best Paper Award", "AR Display", "Prescription Correcting Glasses", "AR Glasses", "Near Eye Display Design", "Presbyopic User", "IEEE VR 2018", "Accepted TVCG Paper", "Eye Trackers", "Display Glasses", "Auto Focus Augmented Reality Eyeglasses", "Lenses", "Glass", "Liquids", "Augmented Reality", "Sensors", "Liquid Crystal Displays" ], "authors": [ { "affiliation": "UNC, Chapel Hill", "fullName": "Praneeth Chakravarthula", "givenName": "Praneeth", "surname": "Chakravarthula", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1379-1380", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798171", "articleId": "1cJ0UKlSMP6", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798207", "articleId": "1cJ10bYBC2Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvmp/2010/4268/0/4268a123", "title": "Helium3D: A Laser-Based 3D Display with '3D+' Capability", "doi": null, "abstractUrl": "/proceedings-article/cvmp/2010/4268a123/12OmNAoDhXO", "parentPublication": { "id": "proceedings/cvmp/2010/4268/0", "title": "2010 Conference on Visual Media Production", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836523", "title": "Human Attention and fatigue for AR Head-Up Displays", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836523/12OmNwFidbp", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2015/7079/0/07169846", "title": "Non-calibrated and real-time human view estimation using a mobile corneal imaging camera", "doi": null, "abstractUrl": "/proceedings-article/icmew/2015/07169846/12OmNxu6pai", "parentPublication": { "id": "proceedings/icmew/2015/7079/0", "title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmpeur/1989/1940/0/00093381", "title": "Engineering visual characteristics to match vision", "doi": null, "abstractUrl": "/proceedings-article/cmpeur/1989/00093381/12OmNzRqdDI", "parentPublication": { "id": "proceedings/cmpeur/1989/1940/0", "title": "COMPEURO 89 Proceedings VLSI and Computer Peripherals.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007218", "title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08458263", "title": "FocusAR: Auto-focus Augmented Reality Eyeglasses for both Real World and Virtual Imagery", "doi": null, "abstractUrl": "/journal/tg/2018/11/08458263/14M3DXKXjwc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676155", "title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2022/9548/0/954800a246", "title": "Comparison of Virtual-Real Integration Efficiency between Light Field and Conventional Near-Eye AR Displays", "doi": null, "abstractUrl": "/proceedings-article/mipr/2022/954800a246/1GvditqC14Q", "parentPublication": { "id": "proceedings/mipr/2022/9548/0", "title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a387", "title": "Light Field Display: An Adaptive Weighted Dual-Layer LCD Display for Multiple Views", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a387/1ap5x2N7jP2", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2020/01/08889397", "title": "Glasses-Free 3-D and Augmented Reality Display Advances: From Theory to Implementation", "doi": null, "abstractUrl": "/magazine/mu/2020/01/08889397/1ezPlyZdxeM", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3nIj7S0cU", "doi": "10.1109/CVPR42600.2020.00262", "title": "Blur Aware Calibration of Multi-Focus Plenoptic Camera", "normalizedTitle": "Blur Aware Calibration of Multi-Focus Plenoptic Camera", "abstract": "This paper presents a novel calibration algorithm for Multi-Focus Plenoptic Cameras (MFPCs) using raw images only. The design of such cameras is usually complex and relies on precise placement of optic elements. Several calibration procedures have been proposed to retrieve the camera parameters but relying on simplified models, reconstructed images to extract features, or multiple calibrations when several types of micro-lens are used. Considering blur information, we propose a new Blur Aware Plenoptic (BAP) feature. It is first exploited in a pre-calibration step that retrieves initial camera parameters, and secondly to express a new cost function for our single optimization process. The effectiveness of our calibration method is validated by quantitative and qualitative experiments.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a novel calibration algorithm for Multi-Focus Plenoptic Cameras (MFPCs) using raw images only. The design of such cameras is usually complex and relies on precise placement of optic elements. Several calibration procedures have been proposed to retrieve the camera parameters but relying on simplified models, reconstructed images to extract features, or multiple calibrations when several types of micro-lens are used. Considering blur information, we propose a new Blur Aware Plenoptic (BAP) feature. It is first exploited in a pre-calibration step that retrieves initial camera parameters, and secondly to express a new cost function for our single optimization process. The effectiveness of our calibration method is validated by quantitative and qualitative experiments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a novel calibration algorithm for Multi-Focus Plenoptic Cameras (MFPCs) using raw images only. The design of such cameras is usually complex and relies on precise placement of optic elements. Several calibration procedures have been proposed to retrieve the camera parameters but relying on simplified models, reconstructed images to extract features, or multiple calibrations when several types of micro-lens are used. Considering blur information, we propose a new Blur Aware Plenoptic (BAP) feature. It is first exploited in a pre-calibration step that retrieves initial camera parameters, and secondly to express a new cost function for our single optimization process. The effectiveness of our calibration method is validated by quantitative and qualitative experiments.", "fno": "716800c542", "keywords": [ "Calibration", "Cameras", "Feature Extraction", "Image Reconstruction", "Image Restoration", "Microlenses", "Optimisation", "Multifocus Plenoptic Cameras", "Optic Elements", "Image Reconstruction", "Blur Aware Plenoptic Feature", "Initial Camera Parameters", "Pre Calibration Step Method", "Blur Aware Calibration Algorithm", "MFPC", "Feature Extraction", "Microlens", "BAP Feature", "Optimization Process", "Cameras", "Lenses", "Calibration", "Feature Extraction", "Optimization", "Apertures" ], "authors": [ { "affiliation": "Université Clermont Auvergne, CNRS, SIGMA Clermont, Institut Pascal, France", "fullName": "Mathieu Labussière", "givenName": "Mathieu", "surname": "Labussière", "__typename": "ArticleAuthorType" }, { "affiliation": "Université Clermont Auvergne, CNRS, SIGMA Clermont, Institut Pascal, France", "fullName": "Céline Teulière", "givenName": "Céline", "surname": "Teulière", "__typename": "ArticleAuthorType" }, { "affiliation": "Cerema, Équipe-projet STI, France", "fullName": "Frédéric Bernardin", "givenName": "Frédéric", "surname": "Bernardin", "__typename": "ArticleAuthorType" }, { "affiliation": "Université Clermont Auvergne, CNRS, SIGMA Clermont, Institut Pascal, France", "fullName": "Omar Ait-Aider", "givenName": "Omar", "surname": "Ait-Aider", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "2542-2551", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800c532", "articleId": "1m3nh1kjYEo", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800c552", "articleId": "1m3o6tGQqMo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccp/2010/7023/0/05585092", "title": "Rich image capture with plenoptic cameras", "doi": null, "abstractUrl": "/proceedings-article/iccp/2010/05585092/12OmNAYGls3", "parentPublication": { "id": "proceedings/iccp/2010/7023/0", "title": "2010 IEEE International Conference on Computational Photography (ICCP 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a202", "title": "Non-frontal Camera Calibration Using Focal Stack Imagery", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a202/12OmNC943xl", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2016/2491/0/2491a281", "title": "Blur Calibration for Depth from Defocus", "doi": null, "abstractUrl": "/proceedings-article/crv/2016/2491a281/12OmNs59JP3", "parentPublication": { "id": "proceedings/crv/2016/2491/0", "title": "2016 13th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733b763", "title": "Optimizing the Lens Selection Process for Multi-focus Plenoptic Cameras and Numerical Evaluation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b763/12OmNwtn3En", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwecms/2011/398/0/05952376", "title": "A new method for Scheimpflug camera calibration", "doi": null, "abstractUrl": "/proceedings-article/iwecms/2011/05952376/12OmNx57HFV", "parentPublication": { "id": "proceedings/iwecms/2011/398/0", "title": "2011 10th International Workshop on Electronics, Control, Measurement and Signals (ECMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032a957", "title": "Corner-Based Geometric Calibration of Multi-focus Plenoptic Cameras", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a957/12OmNy5R3sS", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c345", "title": "On the Equivalence of Moving Entrance Pupil and Radial Distortion for Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c345/12OmNyshmIc", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a286", "title": "Calibrating Light-Field Cameras Using Plenoptic Disc Features", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a286/17D45VtKisQ", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200a115", "title": "Blind Calibration for Focused Plenoptic Cameras", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200a115/1cdOJf1lggo", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a434", "title": "Creating Realistic Ground Truth Data for the Evaluation of Calibration Methods for Plenoptic and Conventional Cameras", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a434/1ezREgrRXHy", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwHyZZX", "title": "2013 IEEE Frontiers in Education Conference (FIE)", "acronym": "fie", "groupId": "1000297", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNqIQSlb", "doi": "10.1109/FIE.2013.6684848", "title": "Using different methodologies and technologies to training spatial skill in Engineering Graphic subjects", "normalizedTitle": "Using different methodologies and technologies to training spatial skill in Engineering Graphic subjects", "abstract": "Most papers about spatial skills and their components refer to the fact that engineering, architectural and scientific jobs require a good level of spatial ability. Spatial ability has an impact on every scientific and technical field, so it's still undergoing strong development when it comes to engineering, technology, art and many other aspects of life. In the academic environment, Graphic Design teachers usually see students who have difficulties solving tasks requiring spatial reasoning and viewing abilities. The main aim of this work is the development of didactic material based on several virtual and augmented reality formats, knowing how students behave while using them, and checking if they are useful materials to improve their spatial abilities. This work present Three different technologies: virtual reality, augmented reality and portable document format to find out if they are suitable technologies together suitable methodologies to improve spatial ability and from the student's perspective, their opinion of the tool and their motivation to learn more about the aspects of 3D reality. We present a pilot study that compared the results of improvement in spatial ability acquired by freshman engineering students also a survey of satisfaction and motivation of the methodology and technology used.", "abstracts": [ { "abstractType": "Regular", "content": "Most papers about spatial skills and their components refer to the fact that engineering, architectural and scientific jobs require a good level of spatial ability. Spatial ability has an impact on every scientific and technical field, so it's still undergoing strong development when it comes to engineering, technology, art and many other aspects of life. In the academic environment, Graphic Design teachers usually see students who have difficulties solving tasks requiring spatial reasoning and viewing abilities. The main aim of this work is the development of didactic material based on several virtual and augmented reality formats, knowing how students behave while using them, and checking if they are useful materials to improve their spatial abilities. This work present Three different technologies: virtual reality, augmented reality and portable document format to find out if they are suitable technologies together suitable methodologies to improve spatial ability and from the student's perspective, their opinion of the tool and their motivation to learn more about the aspects of 3D reality. We present a pilot study that compared the results of improvement in spatial ability acquired by freshman engineering students also a survey of satisfaction and motivation of the methodology and technology used.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Most papers about spatial skills and their components refer to the fact that engineering, architectural and scientific jobs require a good level of spatial ability. Spatial ability has an impact on every scientific and technical field, so it's still undergoing strong development when it comes to engineering, technology, art and many other aspects of life. In the academic environment, Graphic Design teachers usually see students who have difficulties solving tasks requiring spatial reasoning and viewing abilities. The main aim of this work is the development of didactic material based on several virtual and augmented reality formats, knowing how students behave while using them, and checking if they are useful materials to improve their spatial abilities. This work present Three different technologies: virtual reality, augmented reality and portable document format to find out if they are suitable technologies together suitable methodologies to improve spatial ability and from the student's perspective, their opinion of the tool and their motivation to learn more about the aspects of 3D reality. We present a pilot study that compared the results of improvement in spatial ability acquired by freshman engineering students also a survey of satisfaction and motivation of the methodology and technology used.", "fno": "06684848", "keywords": [ "Training", "Portable Document Format", "Three Dimensional Displays", "Augmented Reality", "Materials", "Virtual Reality", "Engineering Education", "Spatial Skills", "Training Courses", "Augmented Reality" ], "authors": [ { "affiliation": "Dipt. Expresion Grafica en Arquitectura e Ing., Univ. de La Laguna, La Laguna, Spain", "fullName": "Jorge Martin-Gutierrez", "givenName": "Jorge", "surname": "Martin-Gutierrez", "__typename": "ArticleAuthorType" }, { "affiliation": "Dipt. Cartografia y Expresion Grafica en la Ing., Univ. de Las Palmas de Gran Canaria, Las Palmas de Gran Canaria, Spain", "fullName": "Melchor Garcia-Dominguez", "givenName": "Melchor", "surname": "Garcia-Dominguez", "__typename": "ArticleAuthorType" }, { "affiliation": "Dipt. Cartografia y Expresion Grafica en la Ing., Univ. de Las Palmas de Gran Canaria, Las Palmas de Gran Canaria, Spain", "fullName": "Cristina Roca Gonzalez", "givenName": "Cristina", "surname": "Roca Gonzalez", "__typename": "ArticleAuthorType" }, { "affiliation": "Dipt. Cartografia y Expresion Grafica en la Ing., Univ. de Las Palmas de Gran Canaria, Las Palmas de Gran Canaria, Spain", "fullName": "M. C. Mato Corredeguas", "givenName": "M. C. Mato", "surname": "Corredeguas", "__typename": "ArticleAuthorType" } ], "idPrefix": "fie", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-10-01T00:00:00", "pubType": "proceedings", "pages": "362-368", "year": "2013", "issn": "0190-5848", "isbn": "978-1-4673-5261-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06684847", "articleId": "12OmNwkR5tv", "__typename": "AdjacentArticleType" }, "next": { "fno": "06684849", "articleId": "12OmNyp9MlR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/enase/2015/143/0/07320341", "title": "Using personality traits and a spatial ability test to identify talented aspiring designers in User-Centred Design methodologies", "doi": null, "abstractUrl": "/proceedings-article/enase/2015/07320341/12OmNAGepUw", "parentPublication": { "id": "proceedings/enase/2015/143/0", "title": "2015 International Conference on Evaluation of Novel Approaches to Software Engineering (ENASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2012/1353/0/06462349", "title": "Predictive models on improvement of spatial abilities in controlled training", "doi": null, "abstractUrl": "/proceedings-article/fie/2012/06462349/12OmNAo45Pb", "parentPublication": { "id": "proceedings/fie/2012/1353/0", "title": "2012 Frontiers in Education Conference Proceedings", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2014/3922/0/07044193", "title": "From 2D to 3D: Teaching terrain representation in engineering studies through Augmented reality: Comparative versus 3D pdf", "doi": null, "abstractUrl": "/proceedings-article/fie/2014/07044193/12OmNxwENpC", "parentPublication": { "id": "proceedings/fie/2014/3922/0", "title": "2014 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/latice/2015/9967/0/9967a221", "title": "Learning Geometry with Augmented Reality to Enhance Spatial Ability", "doi": null, "abstractUrl": "/proceedings-article/latice/2015/9967a221/12OmNy5zsoL", "parentPublication": { "id": "proceedings/latice/2015/9967/0", "title": "2015 International Conference on Learning and Teaching in Computing and Engineering (LaTiCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2012/1353/0/06462312", "title": "Development of an augmented reality based remedial course to improve the spatial ability of engineering students", "doi": null, "abstractUrl": "/proceedings-article/fie/2012/06462312/12OmNzXWZGP", "parentPublication": { "id": "proceedings/fie/2012/1353/0", "title": "2012 Frontiers in Education Conference Proceedings", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2010/4055/0/4055a133", "title": "AR_Dehaes: An Educational Toolkit Based on Augmented Reality Technology for Learning Engineering Graphics", "doi": null, "abstractUrl": "/proceedings-article/icalt/2010/4055a133/12OmNzwZ6nk", "parentPublication": { "id": "proceedings/icalt/2010/4055/0", "title": "Advanced Learning Technologies, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08258112", "title": "On the improvement of classifying EEG recordings using neural networks", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08258112/17D45VN31gm", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08258319", "title": "A generalized incremental bottom-up community detection framework for highly dynamic graphs", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08258319/17D45VObpOx", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08258379", "title": "Comparison of different driving style analysis approaches based on trip segmentation over GPS information", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08258379/17D45WHONmx", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/laclo/2021/2358/0/235800a350", "title": "Interactive ENEM: exams with statistics and free access", "doi": null, "abstractUrl": "/proceedings-article/laclo/2021/235800a350/1BzW0yIdnDq", "parentPublication": { "id": "proceedings/laclo/2021/2358/0", "title": "2021 XVI Latin American Conference on Learning Technologies (LACLO)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwcl7Kf", "title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNyVes7B", "doi": "10.1109/ISMAR.2011.6143898", "title": "Comparing spatial understanding between touch-based and AR-style interaction", "normalizedTitle": "Comparing spatial understanding between touch-based and AR-style interaction", "abstract": "There are currently two primary ways of viewing location specific information in-situ on hand-held mobile device screens: using a see-through augmented reality interface and using a touch-based interface with panoramas. The two approaches use fundamentally different interaction metaphors: an AR-style of interacting where the user holds up the device and physically moves it to change views of the world, and a touch-based technique where panorama navigation is independent of the physical world. We have investigated how this difference in interaction technique impacts a user's spatial understanding of the mixed reality world. Our study found that AR-style interaction provided better spatial understanding overall, while touch-based interaction changed the experience to have more similar characteristics to interaction in a separate virtual environment.", "abstracts": [ { "abstractType": "Regular", "content": "There are currently two primary ways of viewing location specific information in-situ on hand-held mobile device screens: using a see-through augmented reality interface and using a touch-based interface with panoramas. The two approaches use fundamentally different interaction metaphors: an AR-style of interacting where the user holds up the device and physically moves it to change views of the world, and a touch-based technique where panorama navigation is independent of the physical world. We have investigated how this difference in interaction technique impacts a user's spatial understanding of the mixed reality world. Our study found that AR-style interaction provided better spatial understanding overall, while touch-based interaction changed the experience to have more similar characteristics to interaction in a separate virtual environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "There are currently two primary ways of viewing location specific information in-situ on hand-held mobile device screens: using a see-through augmented reality interface and using a touch-based interface with panoramas. The two approaches use fundamentally different interaction metaphors: an AR-style of interacting where the user holds up the device and physically moves it to change views of the world, and a touch-based technique where panorama navigation is independent of the physical world. We have investigated how this difference in interaction technique impacts a user's spatial understanding of the mixed reality world. Our study found that AR-style interaction provided better spatial understanding overall, while touch-based interaction changed the experience to have more similar characteristics to interaction in a separate virtual environment.", "fno": "06162913", "keywords": [ "Visualization", "Virtual Environments", "Analysis Of Variance", "Green Products", "Cameras", "Augmented Reality", "Estimation" ], "authors": [ { "affiliation": "Nokia Research Center", "fullName": "Jason Wither", "givenName": "Jason", "surname": "Wither", "__typename": "ArticleAuthorType" }, { "affiliation": "Nokia Research Center", "fullName": "Sean White", "givenName": "Sean", "surname": "White", "__typename": "ArticleAuthorType" }, { "affiliation": "Nokia Research Center", "fullName": "Ronald Azuma", "givenName": "Ronald", "surname": "Azuma", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-10-01T00:00:00", "pubType": "proceedings", "pages": "273-274", "year": "2011", "issn": null, "isbn": "978-1-4577-2183-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06162912", "articleId": "12OmNBoNrp5", "__typename": "AdjacentArticleType" }, "next": { "fno": "06162914", "articleId": "12OmNAoUT6D", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icppw/2011/4511/0/4511a063", "title": "AR-Based Positioning for Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/icppw/2011/4511a063/12OmNwwuE0H", "parentPublication": { "id": "proceedings/icppw/2011/4511/0", "title": "2011 40th International Conference on Parallel Processing Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-amh/2012/4663/0/06483997", "title": "GeoBoids: A mobile AR application for exergaming", "doi": null, "abstractUrl": "/proceedings-article/ismar-amh/2012/06483997/12OmNxuXcCh", "parentPublication": { "id": "proceedings/ismar-amh/2012/4663/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460025", "title": "Indirect touch manipulation for interaction with stereoscopic displays", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460025/12OmNxwENpw", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549376", "title": "Touch experience in augmented reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549376/12OmNy2agRt", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671786", "title": "Interaction techniques for HMD-HHD hybrid AR systems", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671786/12OmNyxFKaD", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/02/mcg2015020052", "title": "Legibility in Industrial AR: Text Style, Color Coding, and Illuminance", "doi": null, "abstractUrl": "/magazine/cg/2015/02/mcg2015020052/13rRUxjQyxG", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2018/9269/0/926900a185", "title": "AI for Toggling the Linearity of Interactions in AR", "doi": null, "abstractUrl": "/proceedings-article/aivr/2018/926900a185/17D45Wda7fy", "parentPublication": { "id": "proceedings/aivr/2018/9269/0", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699278", "title": "Hybrid UIs for Music Exploration in AR and VR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699278/19F1NJTrBfi", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797993", "title": "VirtualTablet: Extending Movable Surfaces with Touch Interaction", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797993/1cJ1hgQ4Li8", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a324", "title": "Towards In-situ Authoring of AR Visualizations with Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a324/1yeQJrGq6WI", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrMHOd6", "title": "2016 49th Hawaii International Conference on System Sciences (HICSS)", "acronym": "hicss", "groupId": "1000730", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNyvoXmR", "doi": "10.1109/HICSS.2016.695", "title": "Using Multimedia Presentations to Enhance the Judiciary's Technical Understanding of Digital Forensic Concepts: An Indonesian Case Study", "normalizedTitle": "Using Multimedia Presentations to Enhance the Judiciary's Technical Understanding of Digital Forensic Concepts: An Indonesian Case Study", "abstract": "Members of the judiciary and law enforcement agencies need to understand digital forensics in order to determine the admissibility of, and to effectively present, digital evidence in a court. In this paper, we examine the use of multimedia presentations to improve participants' understanding of particular terms and concepts that commonly arise in digital forensic investigations. A questionnaire-based survey was conducted using a convenient sample of judges, investigators, prosecutors and staff from three provinces in Indonesia. We compared the participants' understanding of three technical terms: mobile forensics, time zones, and hashing, before and after watching three educational videos on the respective topics. The results showed that all participants had an increased level of understanding after viewing the educational videos. The participants also provided useful feedback that can be used as a guide for improved design decisions in future multimedia-based training.", "abstracts": [ { "abstractType": "Regular", "content": "Members of the judiciary and law enforcement agencies need to understand digital forensics in order to determine the admissibility of, and to effectively present, digital evidence in a court. In this paper, we examine the use of multimedia presentations to improve participants' understanding of particular terms and concepts that commonly arise in digital forensic investigations. A questionnaire-based survey was conducted using a convenient sample of judges, investigators, prosecutors and staff from three provinces in Indonesia. We compared the participants' understanding of three technical terms: mobile forensics, time zones, and hashing, before and after watching three educational videos on the respective topics. The results showed that all participants had an increased level of understanding after viewing the educational videos. The participants also provided useful feedback that can be used as a guide for improved design decisions in future multimedia-based training.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Members of the judiciary and law enforcement agencies need to understand digital forensics in order to determine the admissibility of, and to effectively present, digital evidence in a court. In this paper, we examine the use of multimedia presentations to improve participants' understanding of particular terms and concepts that commonly arise in digital forensic investigations. A questionnaire-based survey was conducted using a convenient sample of judges, investigators, prosecutors and staff from three provinces in Indonesia. We compared the participants' understanding of three technical terms: mobile forensics, time zones, and hashing, before and after watching three educational videos on the respective topics. The results showed that all participants had an increased level of understanding after viewing the educational videos. The participants also provided useful feedback that can be used as a guide for improved design decisions in future multimedia-based training.", "fno": "5670f617", "keywords": [ "Multimedia Communication", "Digital Forensics", "Training", "Videos", "Law Enforcement", "Electronic Evidence", "Digital Forensics", "Technical Comprehension", "Multimedia Presentations", "Digital Forensic Training" ], "authors": [ { "affiliation": null, "fullName": "Niken Dwi Wahyu Cahyani", "givenName": "Niken Dwi Wahyu", "surname": "Cahyani", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ben Martini", "givenName": "Ben", "surname": "Martini", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kim-Kwang Raymond Choo", "givenName": "Kim-Kwang Raymond", "surname": "Choo", "__typename": "ArticleAuthorType" } ], "idPrefix": "hicss", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2016-01-01T00:00:00", "pubType": "proceedings", "pages": "5617-5626", "year": "2016", "issn": "1530-1605", "isbn": "978-0-7695-5670-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5670f607", "articleId": "12OmNx57HMU", "__typename": "AdjacentArticleType" }, "next": { "fno": "5670f627", "articleId": "12OmNwEJ0SL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363390", "title": "Investigating Big Data Healthcare Security Issues with Raspberry Pi", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363390/12OmNBInLnB", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2011/9618/0/05719005", "title": "Bridging Differences in Digital Forensics for Law Enforcement and National Security", "doi": null, "abstractUrl": "/proceedings-article/hicss/2011/05719005/12OmNqGRGbz", "parentPublication": { "id": "proceedings/hicss/2011/9618/0", "title": "2011 44th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/spw/2014/5103/0/5103a057", "title": "DF-C2M2: A Capability Maturity Model for Digital Forensics Organisations", "doi": null, "abstractUrl": "/proceedings-article/spw/2014/5103a057/12OmNwCsdIH", "parentPublication": { "id": "proceedings/spw/2014/5103/0", "title": "2014 IEEE Security and Privacy Workshops (SPW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcsw/2012/4686/0/4686a542", "title": "When Digital Forensic Research Meets Laws", "doi": null, "abstractUrl": "/proceedings-article/icdcsw/2012/4686a542/12OmNwdbVdv", "parentPublication": { "id": "proceedings/icdcsw/2012/4686/0", "title": "2012 32nd International Conference on Distributed Computing Systems Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aidm/2006/2730/0/27300058", "title": "Using Coupled Hidden Markov Models to Model Suspect Interactions in Digital Forensic Analysis", "doi": null, "abstractUrl": "/proceedings-article/aidm/2006/27300058/12OmNwwd2U2", "parentPublication": { "id": "proceedings/aidm/2006/2730/0", "title": "2006 First International Workshop on Integrating AI and Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-nier/2018/5662/0/566201a009", "title": "Towards Forensic-Ready Software Systems", "doi": null, "abstractUrl": "/proceedings-article/icse-nier/2018/566201a009/13bd1rsER1j", "parentPublication": { "id": "proceedings/icse-nier/2018/5662/0", "title": "2018 IEEE/ACM 40th International Conference on Software Engineering: New Ideas and Emerging Technologies Results (ICSE-NIER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom-bigdatase-icess/2017/4906/0/08029505", "title": "Volkswagen Car Entertainment System Forensics", "doi": null, "abstractUrl": "/proceedings-article/trustcom-bigdatase-icess/2017/08029505/17D45WXIkGf", "parentPublication": { "id": "proceedings/trustcom-bigdatase-icess/2017/4906/0", "title": "2017 IEEE Trustcom/BigDataSE/ICESS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2017/3066/0/08276850", "title": "A Forensic Investigation of the Robot Operating System", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2017/08276850/17D45Xcttnk", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2017/3066/0", "title": "2017 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/sp/2019/01/08674170", "title": "Recent Advancements in Digital Forensics, Part 2", "doi": null, "abstractUrl": "/magazine/sp/2019/01/08674170/18GGoSAPeg0", "parentPublication": { "id": "mags/sp", "title": "IEEE Security & Privacy", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edocw/2021/4488/0/448800a067", "title": "AFES: An Advanced Forensic Evidence System", "doi": null, "abstractUrl": "/proceedings-article/edocw/2021/448800a067/1yZ5AifH9sY", "parentPublication": { "id": "proceedings/edocw/2021/4488/0", "title": "2021 IEEE 25th International Enterprise Distributed Object Computing Workshop (EDOCW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1AH82uivdwA", "title": "2021 2nd International Conference on Big Data & Artificial Intelligence & Software Engineering (ICBASE)", "acronym": "icbase", "groupId": "1841125", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1AH8dc1M6HK", "doi": "10.1109/ICBASE53849.2021.00027", "title": "Construction and Research of Virtual Forest Environment Based on Spatial Data", "normalizedTitle": "Construction and Research of Virtual Forest Environment Based on Spatial Data", "abstract": "Virtual Forest environment is a virtual geographical environment in the application of scientific forestry practices; it is a combination of forestry science and virtual geographical environment. Based on forestry spatial data and virtual reality technology, it constructs forest objects and expresses extremely complex forest phenomena. This article introduces how to create a virtual environment object of forest, forest-related technology, the virtual environments research, and pointes out the importance of creating a virtual forest environment.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual Forest environment is a virtual geographical environment in the application of scientific forestry practices; it is a combination of forestry science and virtual geographical environment. Based on forestry spatial data and virtual reality technology, it constructs forest objects and expresses extremely complex forest phenomena. This article introduces how to create a virtual environment object of forest, forest-related technology, the virtual environments research, and pointes out the importance of creating a virtual forest environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual Forest environment is a virtual geographical environment in the application of scientific forestry practices; it is a combination of forestry science and virtual geographical environment. Based on forestry spatial data and virtual reality technology, it constructs forest objects and expresses extremely complex forest phenomena. This article introduces how to create a virtual environment object of forest, forest-related technology, the virtual environments research, and pointes out the importance of creating a virtual forest environment.", "fno": "270900a108", "keywords": [ "Forestry", "Geographic Information Systems", "Virtual Reality", "Virtual Forest Environment", "Virtual Geographical Environment", "Scientific Forestry Practices", "Forestry Spatial Data", "Virtual Reality Technology", "Forest Objects", "Forest Phenomena", "Forest Related Technology", "Virtual Environments Research", "Visualization", "Virtual Environments", "Data Visualization", "Forestry", "Vegetation", "Spatial Databases", "Planning", "Virtual Forest Environment", "Landscape", "Stands Forestry Spatial Data", "Forest Objects" ], "authors": [ { "affiliation": "YunNan Open University,School of Media and Information Engineering,Kunming,China", "fullName": "Yuan Yang", "givenName": "Yuan", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icbase", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-09-01T00:00:00", "pubType": "proceedings", "pages": "108-111", "year": "2021", "issn": null, "isbn": "978-1-6654-2709-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "270900a101", "articleId": "1AH88k9nh5K", "__typename": "AdjacentArticleType" }, "next": { "fno": "270900a112", "articleId": "1AH8fBX6ncs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isvri/2011/0054/0/05759637", "title": "Realistic real-time rendering for large-scale forest scenes", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759637/12OmNqH9hks", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icecs/2009/3937/0/3937a081", "title": "Computer Design and Simulation of Built Environment; Application to Forest", "doi": null, "abstractUrl": "/proceedings-article/icecs/2009/3937a081/12OmNqHqSuB", "parentPublication": { "id": "proceedings/icecs/2009/3937/0", "title": "Environmental and Computer Science, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esiat/2009/3682/3/3682c073", "title": "Changes of Forest Landscape Based on Historical Management in Northeastern China", "doi": null, "abstractUrl": "/proceedings-article/esiat/2009/3682c073/12OmNxRF6VX", "parentPublication": { "id": "proceedings/esiat/2009/3682/3", "title": "Environmental Science and Information Application Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifcsta/2009/3930/1/3930a072", "title": "Study of Constructing the Forestry Spatial Information Service System Based on SOA Technology", "doi": null, "abstractUrl": "/proceedings-article/ifcsta/2009/3930a072/12OmNyeECFO", "parentPublication": { "id": "proceedings/ifcsta/2009/3930/3", "title": "Computer Science-Technology and Applications, International Forum on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icinis/2013/2809/0/2809a296", "title": "Application of Hani People's Traditional Knowledge on Forest Management", "doi": null, "abstractUrl": "/proceedings-article/icinis/2013/2809a296/12OmNzwpUmZ", "parentPublication": { "id": "proceedings/icinis/2013/2809/0", "title": "2013 6th International Conference on Intelligent Networks and Intelligent Systems (ICINIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csii/2018/7875/0/787501a127", "title": "Development of Forest Information Management DB System Considering Ease of Use", "doi": null, "abstractUrl": "/proceedings-article/csii/2018/787501a127/13xI8B5Z80r", "parentPublication": { "id": "proceedings/csii/2018/7875/0", "title": "2018 5th International Conference on Computational Science/Intelligence and Applied Informatics (CSII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwecai/2022/7997/0/799700a550", "title": "Precipitation forecast model of three forest areas based on polynomial and neural network", "doi": null, "abstractUrl": "/proceedings-article/iwecai/2022/799700a550/1CugtBN0qEU", "parentPublication": { "id": "proceedings/iwecai/2022/7997/0", "title": "2022 3rd International Conference on Electronic Communication and Artificial Intelligence (IWECAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icict/2020/7283/0/728300a091", "title": "Simulate Forest Trees by Integrating L-System and 3D CAD Files", "doi": null, "abstractUrl": "/proceedings-article/icict/2020/728300a091/1jPb7TIJWXm", "parentPublication": { "id": "proceedings/icict/2020/7283/0", "title": "2020 3rd International Conference on Information and Computer Technologies (ICICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbase/2020/9619/0/961900a068", "title": "Study on visualization of forest fire spread based on ArcGIS", "doi": null, "abstractUrl": "/proceedings-article/icbase/2020/961900a068/1t2nzsH8UZG", "parentPublication": { "id": "proceedings/icbase/2020/9619/0", "title": "2020 International Conference on Big Data & Artificial Intelligence & Software Engineering (ICBASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciddt/2020/0367/0/036700a165", "title": "Immersive Virtual Experience Design Research of Jiaoshan Forest of Steles Calligraphy Art", "doi": null, "abstractUrl": "/proceedings-article/iciddt/2020/036700a165/1wutBUZIcvu", "parentPublication": { "id": "proceedings/iciddt/2020/0367/0", "title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJbEwHHqEg", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJch0MXduw", "doi": "10.1109/VR51125.2022.00093", "title": "Spatial Updating in Virtual Reality &#x2013; Auditory and Visual Cues in a Cave Automatic Virtual Environment", "normalizedTitle": "Spatial Updating in Virtual Reality – Auditory and Visual Cues in a Cave Automatic Virtual Environment", "abstract": "When we move through a real environment, egocentric location representations are effortlessly and automatically updated. While moving in synthetic environments, this effortless, continuous spatial updating is often disrupted or incomplete due to a lack of sensory, especially body-based, movement information. To prevent disorientation in virtual reality caused by missing body-based information, the support of spatial updating via other sensory movement cues is necessary. In the presented experiment, participants performed a spatial updating task in a sparse virtual scene presented inside a CAVE (Cave Automatic Virtual Environment). The task was to navigate back to a starting position after simulated movements with either no orientation cues, three visible distant landmarks or one continuous auditory cue present. The focus was not to compare visual and auditory cues but to explore the viability of auditory cueing with visual cues as a reference. Overall, the data showed improved task performance when an orientation cue was present, with auditory cues providing at least as much improvement as visual cues. Our results indicate that auditory cues in virtual environments can support spatial updating when body-based information is missing.", "abstracts": [ { "abstractType": "Regular", "content": "When we move through a real environment, egocentric location representations are effortlessly and automatically updated. While moving in synthetic environments, this effortless, continuous spatial updating is often disrupted or incomplete due to a lack of sensory, especially body-based, movement information. To prevent disorientation in virtual reality caused by missing body-based information, the support of spatial updating via other sensory movement cues is necessary. In the presented experiment, participants performed a spatial updating task in a sparse virtual scene presented inside a CAVE (Cave Automatic Virtual Environment). The task was to navigate back to a starting position after simulated movements with either no orientation cues, three visible distant landmarks or one continuous auditory cue present. The focus was not to compare visual and auditory cues but to explore the viability of auditory cueing with visual cues as a reference. Overall, the data showed improved task performance when an orientation cue was present, with auditory cues providing at least as much improvement as visual cues. Our results indicate that auditory cues in virtual environments can support spatial updating when body-based information is missing.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "When we move through a real environment, egocentric location representations are effortlessly and automatically updated. While moving in synthetic environments, this effortless, continuous spatial updating is often disrupted or incomplete due to a lack of sensory, especially body-based, movement information. To prevent disorientation in virtual reality caused by missing body-based information, the support of spatial updating via other sensory movement cues is necessary. In the presented experiment, participants performed a spatial updating task in a sparse virtual scene presented inside a CAVE (Cave Automatic Virtual Environment). The task was to navigate back to a starting position after simulated movements with either no orientation cues, three visible distant landmarks or one continuous auditory cue present. The focus was not to compare visual and auditory cues but to explore the viability of auditory cueing with visual cues as a reference. Overall, the data showed improved task performance when an orientation cue was present, with auditory cues providing at least as much improvement as visual cues. Our results indicate that auditory cues in virtual environments can support spatial updating when body-based information is missing.", "fno": "961700a719", "keywords": [ "Data Visualisation", "Virtual Reality", "CAVE", "Sparse Virtual Scene", "Sensory Movement Cues", "Body Based Information", "Movement Information", "Cave Automatic Virtual Environment", "Virtual Reality", "Spatial Updating", "Visual Cue", "Auditory Cue", "Visualization", "Three Dimensional Displays", "Navigation", "Conferences", "Virtual Environments", "User Interfaces", "Task Analysis", "Spatial Updating", "Virtual Reality", "Auditory And Visual Cues", "Triangle Completion Task", "CAVE", "Spatial Orientation" ], "authors": [ { "affiliation": "Chemnitz University of Technology,Institute of Physics", "fullName": "Christiane Breitkreutz", "givenName": "Christiane", "surname": "Breitkreutz", "__typename": "ArticleAuthorType" }, { "affiliation": "Chemnitz University of Technology,Institute for Machine Tools and Production Processes", "fullName": "Jennifer Brade", "givenName": "Jennifer", "surname": "Brade", "__typename": "ArticleAuthorType" }, { "affiliation": "Chemnitz University of Technology,Institute for Machine Tools and Production Processes", "fullName": "Sven Winkler", "givenName": "Sven", "surname": "Winkler", "__typename": "ArticleAuthorType" }, { "affiliation": "Chemnitz University of Technology,Cognitive Systems Lab, Institute of Physics", "fullName": "Alexandra Bendixen", "givenName": "Alexandra", "surname": "Bendixen", "__typename": "ArticleAuthorType" }, { "affiliation": "Chemnitz University of Technology,Institute for Machine Tools and Production Processes", "fullName": "Philipp Klimant", "givenName": "Philipp", "surname": "Klimant", "__typename": "ArticleAuthorType" }, { "affiliation": "Chemnitz University of Technology,Professorship of Applied Geropsychology and Cognition", "fullName": "Georg Jahn", "givenName": "Georg", "surname": "Jahn", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "719-727", "year": "2022", "issn": null, "isbn": "978-1-6654-9617-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "961700a711", "articleId": "1CJc667vUJ2", "__typename": "AdjacentArticleType" }, "next": { "fno": "961700a728", "articleId": "1CJc8kd55YY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223342", "title": "Wayfinding by auditory cues in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223342/12OmNB8TU5g", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802057", "title": "Design and evaluation of Binaural auditory rendering for CAVEs", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802057/12OmNCctfc5", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sive/2014/5781/0/07006282", "title": "Audio-visual attractors for capturing attention to the screens when walking in CAVE systems", "doi": null, "abstractUrl": "/proceedings-article/sive/2014/07006282/12OmNvT2oZl", "parentPublication": { "id": "proceedings/sive/2014/5781/0", "title": "2014 IEEE VR Workshop: Sonic Interaction in Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrais/1995/7084/0/70840074", "title": "Presence in virtual environments as a function of visual and auditory cues", "doi": null, "abstractUrl": "/proceedings-article/vrais/1995/70840074/12OmNzlUKP6", "parentPublication": { "id": "proceedings/vrais/1995/7084/0", "title": "Virtual Reality Annual International Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2005/02/u2080", "title": "Navigation with Auditory Cues in a Virtual Environment", "doi": null, "abstractUrl": "/magazine/mu/2005/02/u2080/13rRUwInvi2", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/04/07384536", "title": "Examining Rotation Gain in CAVE-like Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2016/04/07384536/13rRUxOdD2H", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a299", "title": "Visualized Cues for Enhancing Spatial Ability Training in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a299/1CJfbuK0Yfe", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998297", "title": "Teleporting through virtual environments: Effects of path scale and environment scale on spatial updating", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998297/1hrXhk9mu9W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a639", "title": "Visual-Auditory Redirection: Multimodal Integration of Incongruent Visual and Auditory Cues for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a639/1pysvxeFG4E", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a391", "title": "The Effectiveness of Locomotion Interfaces Depends on Self-Motion Cues, Environmental Cues, and the Individual", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a391/1tnXFgLAfSw", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J7W6LmbCw0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "9973799", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J7WxlL5W6Y", "doi": "10.1109/ISMAR-Adjunct57072.2022.00050", "title": "Designing, Prototyping and Testing of <tex>Z_$360^{\\circ}$_Z</tex> Spatial Audio Conferencing for Virtual Tours", "normalizedTitle": "Designing, Prototyping and Testing of - Spatial Audio Conferencing for Virtual Tours", "abstract": "In this paper, we describe the process of designing, prototyping and testing spatial audio interfaces for supporting communication between people viewing <tex>Z_$360^{\\circ}$_Z</tex> images during a virtual tour. When several people are viewing the same <tex>Z_$360^{\\circ}$_Z</tex> image in Virtual Reality (VR) headsets and sharing live audio, it can be difficult to understand them when they are talking at the same time. However, spatial audio could be used to clearly disambiguate between the different speakers. We introduce the concept of Attention-Based Spatial Audio; playing spatial audio from the direction where the speaker is paying attention to. Results indicate statistically significant difference between Attention-Based Spatial Audio and standard spatial audio in terms of cognitive load, social presence, and vocal intelligibility.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we describe the process of designing, prototyping and testing spatial audio interfaces for supporting communication between people viewing <tex>$360^{\\circ}$</tex> images during a virtual tour. When several people are viewing the same <tex>$360^{\\circ}$</tex> image in Virtual Reality (VR) headsets and sharing live audio, it can be difficult to understand them when they are talking at the same time. However, spatial audio could be used to clearly disambiguate between the different speakers. We introduce the concept of Attention-Based Spatial Audio; playing spatial audio from the direction where the speaker is paying attention to. Results indicate statistically significant difference between Attention-Based Spatial Audio and standard spatial audio in terms of cognitive load, social presence, and vocal intelligibility.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we describe the process of designing, prototyping and testing spatial audio interfaces for supporting communication between people viewing - images during a virtual tour. When several people are viewing the same - image in Virtual Reality (VR) headsets and sharing live audio, it can be difficult to understand them when they are talking at the same time. However, spatial audio could be used to clearly disambiguate between the different speakers. We introduce the concept of Attention-Based Spatial Audio; playing spatial audio from the direction where the speaker is paying attention to. Results indicate statistically significant difference between Attention-Based Spatial Audio and standard spatial audio in terms of cognitive load, social presence, and vocal intelligibility.", "fno": "536500a223", "keywords": [ "Audio Signal Processing", "Cognition", "Groupware", "Teleconferencing", "Virtual Reality", "Attention Based Spatial Audio", "Sharing Live Audio", "Spatial Audio Conferencing", "Spatial Audio Interfaces", "Standard Spatial Audio", "Virtual Reality Headsets", "Virtual Tour", "Virtual Tours", "Headphones", "Spatial Audio", "Cognitive Load", "Standards", "Augmented Reality", "Testing" ], "authors": [ { "affiliation": "University of Auckland", "fullName": "Alaeddin Nassani", "givenName": "Alaeddin", "surname": "Nassani", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Auckland", "fullName": "Amit Barde", "givenName": "Amit", "surname": "Barde", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Auckland", "fullName": "Huidong Bai", "givenName": "Huidong", "surname": "Bai", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Auckland", "fullName": "Suranga Nanayakkara", "givenName": "Suranga", "surname": "Nanayakkara", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Auckland", "fullName": "Mark Billinghurst", "givenName": "Mark", "surname": "Billinghurst", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "223-227", "year": "2022", "issn": null, "isbn": "978-1-6654-5365-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "536500a222", "articleId": "1J7WgPd6kLe", "__typename": "AdjacentArticleType" }, "next": { "fno": "536500a233", "articleId": "1J7Wl00rMo8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmew/2022/7218/0/09859426", "title": "Perceptual Evaluation on Audio-Visual Dataset of 360 Content", "doi": null, "abstractUrl": "/proceedings-article/icmew/2022/09859426/1G4F4CwHspq", "parentPublication": { "id": "proceedings/icmew/2022/7218/0", "title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/05/09874253", "title": "BiFuse++: Self-Supervised and Efficient Bi-Projection Fusion for 360&#x00B0; Depth Estimation", "doi": null, "abstractUrl": "/journal/tp/2023/05/09874253/1Gjwzjh5yhi", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f667", "title": "SphereSR: <tex>Z_$360^{\\circ}$_Z</tex> Image Super-Resolution with Arbitrary Projection via Continuous Spherical Image Representation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f667/1H1mQNFEXEQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a491", "title": "Implementation of Attention-Based Spatial Audio for 360&#x00B0; Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a491/1J7Wlf9IrNC", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/10004009", "title": "Macrotile: Toward QoE-Aware and Energy-Efficient 360-Degree Video Streaming", "doi": null, "abstractUrl": "/journal/tm/5555/01/10004009/1JwLoRnPwAg", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a648", "title": "Scaling VR Video Conferencing", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a648/1MNgNl27IME", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a345", "title": "A QoE and Visual Attention Evaluation on the Influence of Spatial Audio in 360 Videos", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a345/1qpzDaHLzhu", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09497715", "title": "Spherical DNNs and Their Applications in 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math></inline-formula> Images and Videos", "doi": null, "abstractUrl": "/journal/tp/2022/10/09497715/1vzY9kuYnwA", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09541093", "title": "Learning Spherical Convolution for <inline-formula><tex-math notation=\"LaTeX\">Z_$360^{\\circ }$_Z</tex-math></inline-formula> Recognition", "doi": null, "abstractUrl": "/journal/tp/2022/11/09541093/1x3fMiX57S8", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09672741", "title": "Multisensory 360&#x00B0; Videos Under Varying Resolution Levels Enhance Presence", "doi": null, "abstractUrl": "/journal/tg/2023/04/09672741/1zWzJCeaeGc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ187OlfvG", "doi": "10.1109/VR.2019.8797679", "title": "Shooter Bias and Socioeconomic Status in Virtual Reality", "normalizedTitle": "Shooter Bias and Socioeconomic Status in Virtual Reality", "abstract": "This poster details an ongoing experiment aiming to test the prevalence of shooter bias in virtual reality. Further, we examine the interaction between shooter bias and perceived socioeconomic status. Even though shooter bias is a well-documented topic in psychology research, little experimentation has used virtual reality, instead opting for unrealistic two-dimensional simulations. This study will yield new insight into shooter bias, especially concerning virtual reality as a tool for use in future work, and will provide new understanding about the relationship between socioeconomic status and shooter bias.", "abstracts": [ { "abstractType": "Regular", "content": "This poster details an ongoing experiment aiming to test the prevalence of shooter bias in virtual reality. Further, we examine the interaction between shooter bias and perceived socioeconomic status. Even though shooter bias is a well-documented topic in psychology research, little experimentation has used virtual reality, instead opting for unrealistic two-dimensional simulations. This study will yield new insight into shooter bias, especially concerning virtual reality as a tool for use in future work, and will provide new understanding about the relationship between socioeconomic status and shooter bias.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This poster details an ongoing experiment aiming to test the prevalence of shooter bias in virtual reality. Further, we examine the interaction between shooter bias and perceived socioeconomic status. Even though shooter bias is a well-documented topic in psychology research, little experimentation has used virtual reality, instead opting for unrealistic two-dimensional simulations. This study will yield new insight into shooter bias, especially concerning virtual reality as a tool for use in future work, and will provide new understanding about the relationship between socioeconomic status and shooter bias.", "fno": "08797679", "keywords": [ "Human Factors", "Psychology", "Social Aspects Of Automation", "Virtual Reality", "Shooter Bias", "Socioeconomic Status", "Virtual Reality", "Psychology Research", "Avatars", "Psychology", "Solid Modeling", "Games", "Weapons", "Law Enforcement", "Shooter Bias X 2014 Immersion X 2014 Presence X 2014 Video Games Violence X 2014 Racial Bias X 2014 Threat Perception" ], "authors": [ { "affiliation": "Davidson College", "fullName": "Evan Blanpied", "givenName": "Evan", "surname": "Blanpied", "__typename": "ArticleAuthorType" }, { "affiliation": "Davidson College", "fullName": "Jessica Good", "givenName": "Jessica", "surname": "Good", "__typename": "ArticleAuthorType" }, { "affiliation": "Davidson College", "fullName": "Tabitha Peck", "givenName": "Tabitha", "surname": "Peck", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "856-857", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798165", "articleId": "1cJ1eZOh6wM", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798214", "articleId": "1cJ0QSLRO6Y", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2010/9343/0/05643620", "title": "AR Shooter: An augmented reality shooting game system", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643620/12OmNyrIavz", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2015/02/06922494", "title": "Adaptive Shooting for Bots in First Person Shooter Games Using Reinforcement Learning", "doi": null, "abstractUrl": "/journal/ci/2015/02/06922494/13rRUwInvnn", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2018/9605/0/960500a067", "title": "Controlling First-Person Character Movement: A Low-Cost Camera-based Tracking Alternative for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2018/960500a067/17D45WK5AqA", "parentPublication": { "id": "proceedings/sbgames/2018/9605/0", "title": "2018 17th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699208", "title": "Demonstrating Emotion Sharing and Augmentation in Cooperative Virtual Reality Games", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699208/19F1PYyd2De", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlbdbi/2021/1790/0/179000a608", "title": "Research Status of the Application of Virtual Reality Technology on Self-efficacy", "doi": null, "abstractUrl": "/proceedings-article/mlbdbi/2021/179000a608/1BQiyLVBsY0", "parentPublication": { "id": "proceedings/mlbdbi/2021/1790/0", "title": "2021 3rd International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798008", "title": "[DC] Embodied Virtual Avatars and Potential Negative Effects on Implicit Racial Bias", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798008/1cJ0WBlYR7G", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090416", "title": "Shooter Bias in Virtual Reality: The Effect of Avatar Race and Socioeconomic Status on Shooting Decisions", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090416/1jIxANOupNK", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382876", "title": "Evidence of Racial Bias Using Immersive Virtual Reality: Analysis of Head and Hand Motions During Shooting Decisions", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382876/1saZsrqdHJm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a023", "title": "Design and Simulation of Next-Generation Augmented Reality User Interfaces in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a023/1tnXWEmiofK", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichci/2020/2316/0/231600a071", "title": "Research status of depression related to virtual reality technology in China", "doi": null, "abstractUrl": "/proceedings-article/ichci/2020/231600a071/1tuAaiSt2Gk", "parentPublication": { "id": "proceedings/ichci/2020/2316/0", "title": "2020 International Conference on Intelligent Computing and Human-Computer Interaction (ICHCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1e5ZpIoqcVi", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "acronym": "vs-games", "groupId": "1002788", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1e5Zs0bUtxu", "doi": "10.1109/VS-Games.2019.8864599", "title": "Development of a Directed Teleport Function for Immersive Training in Virtual Reality", "normalizedTitle": "Development of a Directed Teleport Function for Immersive Training in Virtual Reality", "abstract": "Recent advances in Virtual Reality (VR) technology have contributed to the development of immersive applications for training and simulation. VR serious games can be utilized to support and supplement traditional training. In VR-based training environments, the player can make mistakes without serious consequences to gather experiences which help them to make better decisions in the future. However, it is difficult to guide the player's attention throughout the VR game, since the player has the freedom to look anywhere anytime. Thus, many game designers create linear and restrictive experiences. In this paper, we develop a dynamic story and guide the player's attention to the specific game elements. To this end, we propose a novel directed teleport function to show points of interest to the player. We evaluate the effect of the proposed function by conducting a user study among two groups of 20 participants: one group can use only the common teleport function, whereas the other group can additionally use the directed teleport function. The results of our study indicate that the directed teleport function is very effective, has a positive effect on the orientation, and is very easy to use. In particular, the directed teleport function not only helps the player to navigate through the virtual world but also reveals Interesting points.", "abstracts": [ { "abstractType": "Regular", "content": "Recent advances in Virtual Reality (VR) technology have contributed to the development of immersive applications for training and simulation. VR serious games can be utilized to support and supplement traditional training. In VR-based training environments, the player can make mistakes without serious consequences to gather experiences which help them to make better decisions in the future. However, it is difficult to guide the player's attention throughout the VR game, since the player has the freedom to look anywhere anytime. Thus, many game designers create linear and restrictive experiences. In this paper, we develop a dynamic story and guide the player's attention to the specific game elements. To this end, we propose a novel directed teleport function to show points of interest to the player. We evaluate the effect of the proposed function by conducting a user study among two groups of 20 participants: one group can use only the common teleport function, whereas the other group can additionally use the directed teleport function. The results of our study indicate that the directed teleport function is very effective, has a positive effect on the orientation, and is very easy to use. In particular, the directed teleport function not only helps the player to navigate through the virtual world but also reveals Interesting points.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent advances in Virtual Reality (VR) technology have contributed to the development of immersive applications for training and simulation. VR serious games can be utilized to support and supplement traditional training. In VR-based training environments, the player can make mistakes without serious consequences to gather experiences which help them to make better decisions in the future. However, it is difficult to guide the player's attention throughout the VR game, since the player has the freedom to look anywhere anytime. Thus, many game designers create linear and restrictive experiences. In this paper, we develop a dynamic story and guide the player's attention to the specific game elements. To this end, we propose a novel directed teleport function to show points of interest to the player. We evaluate the effect of the proposed function by conducting a user study among two groups of 20 participants: one group can use only the common teleport function, whereas the other group can additionally use the directed teleport function. The results of our study indicate that the directed teleport function is very effective, has a positive effect on the orientation, and is very easy to use. In particular, the directed teleport function not only helps the player to navigate through the virtual world but also reveals Interesting points.", "fno": "08864599", "keywords": [ "Computer Based Training", "Serious Games Computing", "Teleportation", "Virtual Reality", "Directed Teleport Function", "Immersive Training", "Virtual Reality Technology", "VR Serious Games", "VR Based Training", "Immersive Virtual Reality", "Games", "Training", "Teleportation", "Virtual Environments", "Automobiles", "Law Enforcement", "Immersive Virtual Reality", "Serious Game", "3 D Storytelling", "Virtual Worlds", "Training", "Simulations", "Teleportation", "Full Body Motion Reconstruction", "HTC Vive" ], "authors": [ { "affiliation": "Multimedia Communications Lab, Technische Universität Darmstadt, Darmstadt, Germany", "fullName": "Polona Caserman", "givenName": "Polona", "surname": "Caserman", "__typename": "ArticleAuthorType" }, { "affiliation": "Multimedia Communications Lab, Technische Universität Darmstadt, Darmstadt, Germany", "fullName": "Hongtao Zhang", "givenName": "Hongtao", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Polizei Hessen, Wiesbaden, Germany", "fullName": "Jonas Zinnäcker", "givenName": "Jonas", "surname": "Zinnäcker", "__typename": "ArticleAuthorType" }, { "affiliation": "Multimedia Communications Lab, Technische Universität Darmstadt, Darmstadt, Germany", "fullName": "Stefan Göbel", "givenName": "Stefan", "surname": "Göbel", "__typename": "ArticleAuthorType" } ], "idPrefix": "vs-games", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-09-01T00:00:00", "pubType": "proceedings", "pages": "1-8", "year": "2019", "issn": null, "isbn": "978-1-7281-4540-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08864583", "articleId": "1e5ZtdBx66c", "__typename": "AdjacentArticleType" }, "next": { "fno": "08864579", "articleId": "1e5ZqUGv6Cc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892335", "title": "Designing intentional impossible spaces in virtual reality narratives: A case study", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892335/12OmNApcu9b", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446620", "title": "Spatial Updating and Simulator Sickness During Steering and Jumping in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446620/13bd1fKQxs4", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642365", "title": "VR Exploration Assistance through Automatic Occlusion Removal", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642365/17PYEj2mz9Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a238", "title": "Comparing Teleportation Methods for Travel in Everyday Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a238/1CJdYyJV76E", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a501", "title": "Exploring Three-Dimensional Locomotion Techniques in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a501/1J7WrBbMYEg", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/01/08762207", "title": "Locomotion in Place in Virtual Reality: A Comparative Evaluation of Joystick, Teleport, and Leaning", "doi": null, "abstractUrl": "/journal/tg/2021/01/08762207/1bIeI0S82Aw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798155", "title": "Grasping objects in immersive Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798155/1cJ0SxJIrrW", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090536", "title": "Elastic-Move: Passive Haptic Device with Force Feedback for Virtual Reality Locomotion", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090536/1jIxqFQXvSE", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090614", "title": "Exploring VR Training for First Responders", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090614/1jIxrG0gcCs", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a633", "title": "Immersive Authoring of Virtual Reality Training", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a633/1tnXNG6t1x6", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ13BSrOkU", "doi": "10.1109/VR.2019.8797901", "title": "DepthText: Leveraging Head Movements towards the Depth Dimension for Hands-free Text Entry in Mobile Virtual Reality Systems", "normalizedTitle": "DepthText: Leveraging Head Movements towards the Depth Dimension for Hands-free Text Entry in Mobile Virtual Reality Systems", "abstract": "Text entry is a common activity in virtual reality (VR) systems. However, there is a limited number of approaches available for mobile VR systems, where it might be inconvenient for users to carry an input device. We propose a novel hands-free text entry technique we call DepthText which leverages the acceleration sensing abilities of built-in IMU sensors of mobile VR systems. Users are able to enter text by moving their head forward. The results of a 5-day study indicate that users can achieve an average of 10.76 words per minute (wpm) on the last day with low errors. This performance is comparable to the dwell-based technique which is the most common way of entering text that is hands-free. One advantage of DepthText over the dwell-based technique is that users can have more control of the pace of selecting characters, rather than being pushed by a pre-set dwell time.", "abstracts": [ { "abstractType": "Regular", "content": "Text entry is a common activity in virtual reality (VR) systems. However, there is a limited number of approaches available for mobile VR systems, where it might be inconvenient for users to carry an input device. We propose a novel hands-free text entry technique we call DepthText which leverages the acceleration sensing abilities of built-in IMU sensors of mobile VR systems. Users are able to enter text by moving their head forward. The results of a 5-day study indicate that users can achieve an average of 10.76 words per minute (wpm) on the last day with low errors. This performance is comparable to the dwell-based technique which is the most common way of entering text that is hands-free. One advantage of DepthText over the dwell-based technique is that users can have more control of the pace of selecting characters, rather than being pushed by a pre-set dwell time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Text entry is a common activity in virtual reality (VR) systems. However, there is a limited number of approaches available for mobile VR systems, where it might be inconvenient for users to carry an input device. We propose a novel hands-free text entry technique we call DepthText which leverages the acceleration sensing abilities of built-in IMU sensors of mobile VR systems. Users are able to enter text by moving their head forward. The results of a 5-day study indicate that users can achieve an average of 10.76 words per minute (wpm) on the last day with low errors. This performance is comparable to the dwell-based technique which is the most common way of entering text that is hands-free. One advantage of DepthText over the dwell-based technique is that users can have more control of the pace of selecting characters, rather than being pushed by a pre-set dwell time.", "fno": "08797901", "keywords": [ "Inertial Systems", "Mobile Computing", "Text Analysis", "Virtual Reality", "Depth Text", "Head Movements", "Depth Dimension", "Mobile Virtual Reality Systems", "Mobile VR Systems", "Hands Free Text Entry Technique", "Acceleration Sensing Abilities", "Dwell Based Technique", "Built In IMU Sensors", "Acceleration", "Sensors", "Virtual Reality", "Input Devices", "Error Analysis", "Mobile Handsets", "Training" ], "authors": [ { "affiliation": "Xi'an Jiaotong-Liverpool University", "fullName": "Xueshi Lu", "givenName": "Xueshi", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": "Xi'an Jiaotong-Liverpool University", "fullName": "Difeng Yu", "givenName": "Difeng", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Xi'an Jiaotong-Liverpool University", "fullName": "Hai-Ning Liang", "givenName": "Hai-Ning", "surname": "Liang", "__typename": "ArticleAuthorType" }, { "affiliation": "Xi'an Jiaotong-Liverpool University", "fullName": "Xiyu Feng", "givenName": "Xiyu", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "Xi'an Jiaotong-Liverpool University", "fullName": "Wenge Xu", "givenName": "Wenge", "surname": "Xu", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1060-1061", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798164", "articleId": "1cJ1e7ULbji", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798017", "articleId": "1cJ0YUTkHao", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446059", "title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08456570", "title": "PizzaText: Text Entry for Virtual Reality Systems Using Dual Thumbsticks", "doi": null, "abstractUrl": "/journal/tg/2018/11/08456570/14M3DYGRu3o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642443", "title": "RingText: Dwell-free and hands-free Text Entry for Mobile Head-Mounted Displays using Head Motions", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642443/17PYEjrlgBP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a968", "title": "Asymmetric interfaces with stylus and gesture for VR sketching", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a968/1CJdzTRQ9s4", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a074", "title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a131", "title": "Evaluation of Text Selection Techniques in Virtual Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a131/1JrRdnGe43C", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049695", "title": "CrowbarLimbs: A Fatigue-Reducing Virtual Reality Text Entry Metaphor", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049695/1KYowtn3pok", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/07/08723303", "title": "Errata to &#x201C;RingText: Dwell-Free and Hands-Free Text Entry for Mobile Head-Mounted Displays Using Head Motions&#x201D; [May 19 1991-2001]", "doi": null, "abstractUrl": "/journal/tg/2019/07/08723303/1aqzjJfQFCU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797916", "title": "DepthMove: Hands-free Interaction in Virtual Reality Using Head Motions in the Depth Dimension", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797916/1cJ0K0zJcv6", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a344", "title": "Exploration of Hands-free Text Entry Techniques For Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a344/1pysyrYBX5C", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvRU0cM", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNBpEeZr", "doi": "10.1109/ISMAR-Adjunct.2017.18", "title": "Tutorial 3: SOFA, an Open-Source Framework for Physics Simulation in Augmented Reality", "normalizedTitle": "Tutorial 3: SOFA, an Open-Source Framework for Physics Simulation in Augmented Reality", "abstract": "This tutorial will shortly review the background in physics simulation and introduce the main principles of the SOFA framework. Examples of SOFA simulations will be presented. The afternoon will be more \"hands-on\" oriented starting with an interactive user tutorial, followed by a developer tutorial. Further to this tutorial, you should have all the basis to build your own physics simulation. The SOFA tutorial at ISMAR is the opportunity to discover an open-source physics engine and include physics in your AR applications. From a starting up to a developer level, this tutorial focus on the wide topic of physics simulation. Not only will the physics of SOFA add realism into your AR application, but it might allow you to address new research and industrial challenges. Moreover, the flexible architecture of the software and the large international open-source community will make your start with SOFA easier. Attend the tutorial and join the community! This tutorial is done once a year. A global publication about SOFA has been published in 2012: Multi-Model Framework for Interactive Physical Simulation. All publications based on SOFA can be found here: https://www.sofa-framework.org/applications/publications/.", "abstracts": [ { "abstractType": "Regular", "content": "This tutorial will shortly review the background in physics simulation and introduce the main principles of the SOFA framework. Examples of SOFA simulations will be presented. The afternoon will be more \"hands-on\" oriented starting with an interactive user tutorial, followed by a developer tutorial. Further to this tutorial, you should have all the basis to build your own physics simulation. The SOFA tutorial at ISMAR is the opportunity to discover an open-source physics engine and include physics in your AR applications. From a starting up to a developer level, this tutorial focus on the wide topic of physics simulation. Not only will the physics of SOFA add realism into your AR application, but it might allow you to address new research and industrial challenges. Moreover, the flexible architecture of the software and the large international open-source community will make your start with SOFA easier. Attend the tutorial and join the community! This tutorial is done once a year. A global publication about SOFA has been published in 2012: Multi-Model Framework for Interactive Physical Simulation. All publications based on SOFA can be found here: https://www.sofa-framework.org/applications/publications/.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This tutorial will shortly review the background in physics simulation and introduce the main principles of the SOFA framework. Examples of SOFA simulations will be presented. The afternoon will be more \"hands-on\" oriented starting with an interactive user tutorial, followed by a developer tutorial. Further to this tutorial, you should have all the basis to build your own physics simulation. The SOFA tutorial at ISMAR is the opportunity to discover an open-source physics engine and include physics in your AR applications. From a starting up to a developer level, this tutorial focus on the wide topic of physics simulation. Not only will the physics of SOFA add realism into your AR application, but it might allow you to address new research and industrial challenges. Moreover, the flexible architecture of the software and the large international open-source community will make your start with SOFA easier. Attend the tutorial and join the community! This tutorial is done once a year. A global publication about SOFA has been published in 2012: Multi-Model Framework for Interactive Physical Simulation. All publications based on SOFA can be found here: https://www.sofa-framework.org/applications/publications/.", "fno": "6327z032", "keywords": [ "Augmented Reality", "Physics Computing", "Public Domain Software", "SOFA", "Open Source Framework", "Physics Simulation", "Augmented Reality" ], "authors": [], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "xxxii-xxxii", "year": "2017", "issn": null, "isbn": "978-0-7695-6327-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "6327z031", "articleId": "12OmNyRg494", "__typename": "AdjacentArticleType" }, "next": { "fno": "6327a001", "articleId": "12OmNBQ2VWK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2015/7660/0/7660z030", "title": "Tutorial 1: Global-scale Localization in Outdoor Environments for AR", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660z030/12OmNAS9zN0", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icre/1998/8356/0/00667809", "title": "A Case for Priority: Introduction to the Mini-Tutorial", "doi": null, "abstractUrl": "/proceedings-article/icre/1998/00667809/12OmNrHSD2R", "parentPublication": { "id": "proceedings/icre/1998/8356/0", "title": "Proceedings of IEEE International Symposium on Requirements Engineering: RE '98", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuvr/2008/3259/0/3259a037", "title": "Introduction of Physics Simulation in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2008/3259a037/12OmNvB9Fyb", "parentPublication": { "id": "proceedings/isuvr/2008/3259/0", "title": "International Symposium on Ubiquitous Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892358", "title": "Augmented reality: Principles and practice", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892358/12OmNx57HS4", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802118", "title": "Quantitative and qualitative methods for human-subject experiments in Virtual and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802118/12OmNxRWI8b", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836436", "title": "Augmented Reality – Principles and Practice Tutorial", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836436/12OmNxVV5Xg", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327z031", "title": "Tutorial 2: Developing Virtual Reality applications with the Visualization Toolkit (VTK)", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327z031/12OmNyRg494", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuvr/2011/4420/0/4420b025", "title": "On Visual Artifacts of Physics Simulation in Augmented Reality Environment", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2011/4420b025/12OmNzIUfTU", "parentPublication": { "id": "proceedings/isuvr/2011/4420/0", "title": "International Symposium on Ubiquitous Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2006/2579/0/04019613", "title": "Testing Tools and Techniques: A Mini-Tutorial on Evaluation Methods for ASE", "doi": null, "abstractUrl": "/proceedings-article/ase/2006/04019613/13Jkrbod1qW", "parentPublication": { "id": "proceedings/ase/2006/2579/0", "title": "21st IEEE/ACM International Conference on Automated Software Engineering (ASE'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090480", "title": "PhyAR: Determining the Utility of Augmented Reality for Physics Education in the Classroom", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090480/1jIxvICmsO4", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwMXnv0", "title": "2014 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNxRWI8b", "doi": "10.1109/VR.2014.6802118", "title": "Quantitative and qualitative methods for human-subject experiments in Virtual and Augmented Reality", "normalizedTitle": "Quantitative and qualitative methods for human-subject experiments in Virtual and Augmented Reality", "abstract": "This tutorial is for researchers and engineers, working in the field of Virtual and Augmented Reality, who wish to conduct user-based experiments and/or evaluations for assessing usability. We propose a full-day tutorial presenting both quantitative and qualitative approaches to conducting human-subject experiments. It will cover (1) the basic principles of experimental design and analysis, with an emphasis on human-subject experiments in AR (Swan), and (2) qualitative studies (e.g., formative evaluation methods) for assessing and improving AR user interfaces and user interaction along with lessons learned from conducting many user-based studies (Gabbard). Swan, Gabbard, and other co-presenters have taught pre-cursor versions of this tutorial 11 previous times at IEEE Virtual Reality, IEEE Visualization, and ISMAR. This tutorial was most recently given at ISMAR 2012, where we included updated examples from our research and further expanded upon qualitative approaches for assessing usability and lessons learned from conducting studies. We both have current, active AR human-subject research projects, and if this tutorial is accepted to be presented at VR 2014, we will discuss some of these projects as case studies.", "abstracts": [ { "abstractType": "Regular", "content": "This tutorial is for researchers and engineers, working in the field of Virtual and Augmented Reality, who wish to conduct user-based experiments and/or evaluations for assessing usability. We propose a full-day tutorial presenting both quantitative and qualitative approaches to conducting human-subject experiments. It will cover (1) the basic principles of experimental design and analysis, with an emphasis on human-subject experiments in AR (Swan), and (2) qualitative studies (e.g., formative evaluation methods) for assessing and improving AR user interfaces and user interaction along with lessons learned from conducting many user-based studies (Gabbard). Swan, Gabbard, and other co-presenters have taught pre-cursor versions of this tutorial 11 previous times at IEEE Virtual Reality, IEEE Visualization, and ISMAR. This tutorial was most recently given at ISMAR 2012, where we included updated examples from our research and further expanded upon qualitative approaches for assessing usability and lessons learned from conducting studies. We both have current, active AR human-subject research projects, and if this tutorial is accepted to be presented at VR 2014, we will discuss some of these projects as case studies.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This tutorial is for researchers and engineers, working in the field of Virtual and Augmented Reality, who wish to conduct user-based experiments and/or evaluations for assessing usability. We propose a full-day tutorial presenting both quantitative and qualitative approaches to conducting human-subject experiments. It will cover (1) the basic principles of experimental design and analysis, with an emphasis on human-subject experiments in AR (Swan), and (2) qualitative studies (e.g., formative evaluation methods) for assessing and improving AR user interfaces and user interaction along with lessons learned from conducting many user-based studies (Gabbard). Swan, Gabbard, and other co-presenters have taught pre-cursor versions of this tutorial 11 previous times at IEEE Virtual Reality, IEEE Visualization, and ISMAR. This tutorial was most recently given at ISMAR 2012, where we included updated examples from our research and further expanded upon qualitative approaches for assessing usability and lessons learned from conducting studies. We both have current, active AR human-subject research projects, and if this tutorial is accepted to be presented at VR 2014, we will discuss some of these projects as case studies.", "fno": "06802118", "keywords": [ "Tutorials", "Usability", "Augmented Reality", "User Interfaces", "Visualization", "Educational Institutions" ], "authors": [ { "affiliation": "Mississippi State University, Organizer", "fullName": "J. Edward Swan", "givenName": "J.", "surname": "Edward Swan", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech", "fullName": "Joseph L. Gabbard", "givenName": "Joseph L.", "surname": "Gabbard", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2014-03-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2014", "issn": null, "isbn": "978-1-4799-2871-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06802117", "articleId": "12OmNz61dxb", "__typename": "AdjacentArticleType" }, "next": { "fno": "06802119", "articleId": "12OmNylsZFt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar-adjunct/2017/6327/0/6327z032", "title": "Tutorial 3: SOFA, an Open-Source Framework for Physics Simulation in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327z032/12OmNBpEeZr", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2012/1247/0/06180945", "title": "VR 2012 tutorial: Quantitative and qualitative methods for human-subject experiments in Virtual and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2012/06180945/12OmNwp74wX", "parentPublication": { "id": "proceedings/vr/2012/1247/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948510", "title": "A ‘Look Into’ Medical augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948510/12OmNx2zjyh", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892358", "title": "Augmented reality: Principles and practice", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892358/12OmNx57HS4", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836436", "title": "Augmented Reality – Principles and Practice Tutorial", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836436/12OmNxVV5Xg", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ic/2013/06/mic2013060066", "title": "Augmented Reality Interfaces", "doi": null, "abstractUrl": "/magazine/ic/2013/06/mic2013060066/13rRUIJcWhZ", "parentPublication": { "id": "mags/ic", "title": "IEEE Internet Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2019/4540/0/08864548", "title": "The Usability of the Microsoft HoloLens for an Augmented Reality Game to Teach Elementary School Children", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2019/08864548/1e5ZpUVkjVS", "parentPublication": { "id": "proceedings/vs-games/2019/4540/0", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090479", "title": "Impact of AR Display Context Switching and Focal Distance Switching on Human Performance: Replication on an AR Haploscope", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090479/1jIxlrWEUmc", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2019/4752/0/09212860", "title": "User Engagement for Collaborative Learning on a Mobile and Desktop Augmented Reality Application", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2019/09212860/1nHRTRhZdRK", "parentPublication": { "id": "proceedings/icvrv/2019/4752/0", "title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a438", "title": "Evaluating Mixed and Augmented Reality: A Systematic Literature Review (2009-2019)", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a438/1pysxe8SeyY", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1RtAFjYk", "doi": "10.1109/ISMAR-Adjunct.2018.00081", "title": "Enterprise AR Functional Requirements Workshop", "normalizedTitle": "Enterprise AR Functional Requirements Workshop", "abstract": "This workshop focuses on the functional requirements for enterprise AR components. Enterprise AR customers have requirements that differ substantially from those of consumers. Having functional requirements directly benefits enterprise customers: products and services will have interoperability, customer RFPs will be easier to create and respond to, and research as well as development communities will have more clear understanding of the requirements of enterprise AR buyers. Those ISMAR attendees conducting research about enterprise AR and providers of AR components and solutions will have clear definitions of customer needs. This will lead to the highest value research and greater enterprise AR project success which can then be used to influence research agendas, development roadmaps and future products. A preliminary set of enterprise AR requirements was created in 2016 through a collaboration between UI LABS (DMDII) and the AREA and delivered through a project led by Lockheed Martin, Caterpillar and Procter &amp; Gamble. In 2017 and 2018, through several additional cycles of input by stakeholders, these requirements have since been refined. This workshop will shed new light on the requirements' current status, and provide valuable inputs to the further refinement and applications of the enterprise AR requirement documents.", "abstracts": [ { "abstractType": "Regular", "content": "This workshop focuses on the functional requirements for enterprise AR components. Enterprise AR customers have requirements that differ substantially from those of consumers. Having functional requirements directly benefits enterprise customers: products and services will have interoperability, customer RFPs will be easier to create and respond to, and research as well as development communities will have more clear understanding of the requirements of enterprise AR buyers. Those ISMAR attendees conducting research about enterprise AR and providers of AR components and solutions will have clear definitions of customer needs. This will lead to the highest value research and greater enterprise AR project success which can then be used to influence research agendas, development roadmaps and future products. A preliminary set of enterprise AR requirements was created in 2016 through a collaboration between UI LABS (DMDII) and the AREA and delivered through a project led by Lockheed Martin, Caterpillar and Procter &amp; Gamble. In 2017 and 2018, through several additional cycles of input by stakeholders, these requirements have since been refined. This workshop will shed new light on the requirements' current status, and provide valuable inputs to the further refinement and applications of the enterprise AR requirement documents.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This workshop focuses on the functional requirements for enterprise AR components. Enterprise AR customers have requirements that differ substantially from those of consumers. Having functional requirements directly benefits enterprise customers: products and services will have interoperability, customer RFPs will be easier to create and respond to, and research as well as development communities will have more clear understanding of the requirements of enterprise AR buyers. Those ISMAR attendees conducting research about enterprise AR and providers of AR components and solutions will have clear definitions of customer needs. This will lead to the highest value research and greater enterprise AR project success which can then be used to influence research agendas, development roadmaps and future products. A preliminary set of enterprise AR requirements was created in 2016 through a collaboration between UI LABS (DMDII) and the AREA and delivered through a project led by Lockheed Martin, Caterpillar and Procter & Gamble. In 2017 and 2018, through several additional cycles of input by stakeholders, these requirements have since been refined. This workshop will shed new light on the requirements' current status, and provide valuable inputs to the further refinement and applications of the enterprise AR requirement documents.", "fno": "08699320", "keywords": [ "Augmented Reality", "Business Data Processing", "Customer Relationship Management", "Open Systems", "Systems Analysis", "Benefits Enterprise Customers", "Enterprise AR Buyers", "Enterprise AR Requirement Documents", "Functional Requirements", "Enterprise AR Customers", "Customer RFP", "Augmented Reality", "Conferences", "Augmented Reality", "Interoperability", "Collaboration", "Stakeholders" ], "authors": [ { "affiliation": null, "fullName": "Michael Rygol", "givenName": "Michael", "surname": "Rygol", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Christine Perey", "givenName": "Christine", "surname": "Perey", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "264-264", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699246", "articleId": "19F1OIk174Q", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699190", "articleId": "19F1MykMwmI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/svr/2012/4725/0/4725a116", "title": "From VR to AR: Adding AR Functionality to an Existing VR Software Framework", "doi": null, "abstractUrl": "/proceedings-article/svr/2012/4725a116/12OmNAYoKsE", "parentPublication": { "id": "proceedings/svr/2012/4725/0", "title": "2012 14th Symposium on Virtual and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a268", "title": "Workshop on augmented reality for good", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a268/12OmNB8Cjak", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948520", "title": "Hands free — Exploring AR glasses and their peculiarities", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948520/12OmNC17hVV", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a251", "title": "Workshop on enterprise AR adoption obstacles", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a251/12OmNqI04Zv", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a252", "title": "Workshop on Standards for Mixed and Augmented Reality Summary", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a252/12OmNwnYG1O", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948517", "title": "Collaboration in mediated and augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948517/12OmNy6HQPU", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a253", "title": "Workshop on VR and AR meet creative industries", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a253/12OmNylKASp", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a326", "title": "Workshop on highly diverse cameras and displays for mixed and augmented reality (HDCD4MAR)", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a326/12OmNzlD9rq", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699182", "title": "International Workshop on Comfort Intelligence with AR for Autonomous Vehicle 2018", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699182/19F1R26pH0Y", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699256", "title": "3rd Virtual and Augmented Reality for Good (VAR4Good) Workshop", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699256/19F1VrcNC7u", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1TV6LZJu", "doi": "10.1109/ISMAR-Adjunct.2018.00093", "title": "Workshop on Creativity in Designing with &#x0026; for Mixed Reality", "normalizedTitle": "Workshop on Creativity in Designing with & for Mixed Reality", "abstract": "Although developments in devices and software are maturing towards novel Mixed Reality systems, there is too little connection to the design field. Especially if AR is combined with other &#x201C;smart&#x201D; technologies (internet of things), perspectives shift from merely technical characteristics and quantifiable human factors to more complex UX scenarios. Although there are other special interest groups/conferences that in part cover this theme (CHI, IUI, UIST), we would think that ISMAR is a better venue in connecting the graphics/tracking community with design researchers. We also would like to address the lack of software engineering skills with design students/professionals. How can we bridge these disciplines and silos of innovation? This workshop invites both industrial and academic participants to contribute to this debate, first of all by submitting extended abstracts that cover case studies, best practices and challenges in design for/with AR. To cater for a design debate, we strongly encourage submissions of annotated artworks/3D scenes/pictures/floorplans as well as more traditional papers. Position papers should be 2-6 pages long, submitted in PDF format and formatted using the ISMAR 2018 paper template available from https://ismar2018.org/guidelines_submission/index.html. Papers will be peer-reviewed and after acceptance be published in the adjunct proceedings or ISMAR. During the workshop papers will be presented/demonstrations as short presentations. Submissions should not be anonymized and the author names and affiliations should be displayed on the first page. At least one author of each accepted paper must attend the workshop and register for at least one day of the conference. All accepted papers workshop's papers will be published in the ISMAR 2018 Adjunct Proceedings.", "abstracts": [ { "abstractType": "Regular", "content": "Although developments in devices and software are maturing towards novel Mixed Reality systems, there is too little connection to the design field. Especially if AR is combined with other &#x201C;smart&#x201D; technologies (internet of things), perspectives shift from merely technical characteristics and quantifiable human factors to more complex UX scenarios. Although there are other special interest groups/conferences that in part cover this theme (CHI, IUI, UIST), we would think that ISMAR is a better venue in connecting the graphics/tracking community with design researchers. We also would like to address the lack of software engineering skills with design students/professionals. How can we bridge these disciplines and silos of innovation? This workshop invites both industrial and academic participants to contribute to this debate, first of all by submitting extended abstracts that cover case studies, best practices and challenges in design for/with AR. To cater for a design debate, we strongly encourage submissions of annotated artworks/3D scenes/pictures/floorplans as well as more traditional papers. Position papers should be 2-6 pages long, submitted in PDF format and formatted using the ISMAR 2018 paper template available from https://ismar2018.org/guidelines_submission/index.html. Papers will be peer-reviewed and after acceptance be published in the adjunct proceedings or ISMAR. During the workshop papers will be presented/demonstrations as short presentations. Submissions should not be anonymized and the author names and affiliations should be displayed on the first page. At least one author of each accepted paper must attend the workshop and register for at least one day of the conference. All accepted papers workshop's papers will be published in the ISMAR 2018 Adjunct Proceedings.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Although developments in devices and software are maturing towards novel Mixed Reality systems, there is too little connection to the design field. Especially if AR is combined with other “smart” technologies (internet of things), perspectives shift from merely technical characteristics and quantifiable human factors to more complex UX scenarios. Although there are other special interest groups/conferences that in part cover this theme (CHI, IUI, UIST), we would think that ISMAR is a better venue in connecting the graphics/tracking community with design researchers. We also would like to address the lack of software engineering skills with design students/professionals. How can we bridge these disciplines and silos of innovation? This workshop invites both industrial and academic participants to contribute to this debate, first of all by submitting extended abstracts that cover case studies, best practices and challenges in design for/with AR. To cater for a design debate, we strongly encourage submissions of annotated artworks/3D scenes/pictures/floorplans as well as more traditional papers. Position papers should be 2-6 pages long, submitted in PDF format and formatted using the ISMAR 2018 paper template available from https://ismar2018.org/guidelines_submission/index.html. Papers will be peer-reviewed and after acceptance be published in the adjunct proceedings or ISMAR. During the workshop papers will be presented/demonstrations as short presentations. Submissions should not be anonymized and the author names and affiliations should be displayed on the first page. At least one author of each accepted paper must attend the workshop and register for at least one day of the conference. All accepted papers workshop's papers will be published in the ISMAR 2018 Adjunct Proceedings.", "fno": "08699272", "keywords": [ "Augmented Reality", "Computer Graphics", "Human Factors", "Internet", "Internet Of Things", "Software Engineering", "User Interfaces", "Creativity", "Smart Technologies", "Design Researchers", "Software Engineering Skills", "Mixed Reality Systems", "Human Factors", "Tracking Community", "Augmented Reality", "Graphics Community", "Internet Of Things", "Conferences", "Augmented Reality", "Creativity", "Software", "Internet Of Things", "Human Factors" ], "authors": [ { "affiliation": null, "fullName": "Jouke Verlinden", "givenName": "Jouke", "surname": "Verlinden", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Doris Aschenbrenner", "givenName": "Doris", "surname": "Aschenbrenner", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Stephan Lukosh", "givenName": "Stephan", "surname": "Lukosh", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "314-314", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699202", "articleId": "19F1Q2ObZBe", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699175", "articleId": "19F1RFfm38A", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wetice/2010/4063/0/4063a203", "title": "COPS Workshop Final Report", "doi": null, "abstractUrl": "/proceedings-article/wetice/2010/4063a203/12OmNvJXeyS", "parentPublication": { "id": "proceedings/wetice/2010/4063/0", "title": "2010 19th IEEE International Workshops on Enabling Technologies: Infrastructures for Collaborative Enterprises", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2015/8471/0/8471a024", "title": "Measuring Perception of Realism in Mixed and Augmented Reality Summary", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2015/8471a024/12OmNwErpst", "parentPublication": { "id": "proceedings/ismarw/2015/8471/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/se-cse/2013/6261/0/06615092", "title": "SE-CSE 2013: The 2013 International Workshop on Software Engineering for Computational Science and Engineering", "doi": null, "abstractUrl": "/proceedings-article/se-cse/2013/06615092/12OmNwbukbv", "parentPublication": { "id": "proceedings/se-cse/2013/6261/0", "title": "2013 5th International Workshop on Software Engineering for Computational Science and Engineering (SE-CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2015/8471/0/8471a087", "title": "Challenges and Applications of Urban Augmented Reality Summary", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2015/8471a087/12OmNwtEECd", "parentPublication": { "id": "proceedings/ismarw/2015/8471/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836444", "title": "Interaction design principles of augmented reality focusing on the ageing population workshop summary", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836444/12OmNxy4MX2", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/06/06807545", "title": "Guest Editor's Introduction: Special Section on the International Symposium on Mixed and Augmented Reality 2012", "doi": null, "abstractUrl": "/journal/tg/2014/06/06807545/13rRUwhHcQT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/05/07067526", "title": "Guest Editor's Introduction to the Special Section on the International Symposium on Mixed and Augmented Reality 2013", "doi": null, "abstractUrl": "/journal/tg/2015/05/07067526/13rRUwwaKta", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699320", "title": "Enterprise AR Functional Requirements Workshop", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699320/19F1RtAFjYk", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/spw/2020/9346/0/934600z017", "title": "Deep Learning and Security Workshop (DLS 2020)", "doi": null, "abstractUrl": "/proceedings-article/spw/2020/934600z017/1pF6Ypd2n04", "parentPublication": { "id": "proceedings/spw/2020/9346/0", "title": "2020 IEEE Security and Privacy Workshops (SPW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09591492", "title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pystLSz19C", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pysxe8SeyY", "doi": "10.1109/ISMAR50242.2020.00069", "title": "Evaluating Mixed and Augmented Reality: A Systematic Literature Review (2009-2019)", "normalizedTitle": "Evaluating Mixed and Augmented Reality: A Systematic Literature Review (2009-2019)", "abstract": "We present a systematic review of 45S papers that report on evaluations in mixed and augmented reality (MR/AR) published in ISMAR, CHI, IEEE VR, and UIST over a span of 11 years (2009-2019). Our goal is to provide guidance for future evaluations of MR/AR approaches. To this end, we characterize publications by paper type (e.g., technique, design study), research topic (e.g., tracking, rendering), evaluation scenario (e.g., algorithm performance, user performance), cognitive aspects (e.g., perception, emotion), and the context in which evaluations were conducted (e.g., lab vs. in-thewild). We found a strong coupling of types, topics, and scenarios. We observe two groups: (a) technology-centric performance evaluations of algorithms that focus on improving tracking, displays, reconstruction, rendering, and calibration, and (b) human-centric studies that analyze implications of applications and design, human factors on perception, usability, decision making, emotion, and attention. Amongst the 458 papers, we identified 248 user studies that involved 5,761 participants in total, of whom only 1,619 were identified as female. We identified 43 data collection methods used to analyze 10 cognitive aspects. We found nine objective methods, and eight methods that support qualitative analysis. A majority (216/248) of user studies are conducted in a laboratory setting. Often (138/248), such studies involve participants in a static way. However, we also found a fair number (30/248) of in-the-wild studies that involve participants in a mobile fashion. We consider this paper to be relevant to academia and industry alike in presenting the state-of-the-art and guiding the steps to designing, conducting, and analyzing results of evaluations in MR/AR.", "abstracts": [ { "abstractType": "Regular", "content": "We present a systematic review of 45S papers that report on evaluations in mixed and augmented reality (MR/AR) published in ISMAR, CHI, IEEE VR, and UIST over a span of 11 years (2009-2019). Our goal is to provide guidance for future evaluations of MR/AR approaches. To this end, we characterize publications by paper type (e.g., technique, design study), research topic (e.g., tracking, rendering), evaluation scenario (e.g., algorithm performance, user performance), cognitive aspects (e.g., perception, emotion), and the context in which evaluations were conducted (e.g., lab vs. in-thewild). We found a strong coupling of types, topics, and scenarios. We observe two groups: (a) technology-centric performance evaluations of algorithms that focus on improving tracking, displays, reconstruction, rendering, and calibration, and (b) human-centric studies that analyze implications of applications and design, human factors on perception, usability, decision making, emotion, and attention. Amongst the 458 papers, we identified 248 user studies that involved 5,761 participants in total, of whom only 1,619 were identified as female. We identified 43 data collection methods used to analyze 10 cognitive aspects. We found nine objective methods, and eight methods that support qualitative analysis. A majority (216/248) of user studies are conducted in a laboratory setting. Often (138/248), such studies involve participants in a static way. However, we also found a fair number (30/248) of in-the-wild studies that involve participants in a mobile fashion. We consider this paper to be relevant to academia and industry alike in presenting the state-of-the-art and guiding the steps to designing, conducting, and analyzing results of evaluations in MR/AR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a systematic review of 45S papers that report on evaluations in mixed and augmented reality (MR/AR) published in ISMAR, CHI, IEEE VR, and UIST over a span of 11 years (2009-2019). Our goal is to provide guidance for future evaluations of MR/AR approaches. To this end, we characterize publications by paper type (e.g., technique, design study), research topic (e.g., tracking, rendering), evaluation scenario (e.g., algorithm performance, user performance), cognitive aspects (e.g., perception, emotion), and the context in which evaluations were conducted (e.g., lab vs. in-thewild). We found a strong coupling of types, topics, and scenarios. We observe two groups: (a) technology-centric performance evaluations of algorithms that focus on improving tracking, displays, reconstruction, rendering, and calibration, and (b) human-centric studies that analyze implications of applications and design, human factors on perception, usability, decision making, emotion, and attention. Amongst the 458 papers, we identified 248 user studies that involved 5,761 participants in total, of whom only 1,619 were identified as female. We identified 43 data collection methods used to analyze 10 cognitive aspects. We found nine objective methods, and eight methods that support qualitative analysis. A majority (216/248) of user studies are conducted in a laboratory setting. Often (138/248), such studies involve participants in a static way. However, we also found a fair number (30/248) of in-the-wild studies that involve participants in a mobile fashion. We consider this paper to be relevant to academia and industry alike in presenting the state-of-the-art and guiding the steps to designing, conducting, and analyzing results of evaluations in MR/AR.", "fno": "850800a438", "keywords": [ "Augmented Reality", "Cognition", "Decision Making", "Human Computer Interaction", "Human Factors", "Internet", "User Interfaces", "Augmented Reality", "Systematic Literature Review", "IEEE VR", "Future Evaluations", "Evaluation Scenario", "User Performance", "Technology Centric Performance Evaluations", "Human Centric Studies", "User Studies", "Data Collection Methods", "Cognitive Aspects", "In The Wild Studies", "Systematics", "Benchmark Testing", "User Interfaces", "Rendering Computer Graphics", "Calibration", "Usability", "Augmented Reality", "Mixed And Augmented Reality", "Evaluation", "Systematic Literature Review", "I 3 7 Computing Methodologies", "Three Dimensional Graphics And Realism", "A 1 General Literature", "Computer Graphics", "Introductory And Survey" ], "authors": [ { "affiliation": "University of Stuttgart", "fullName": "Leonel Merino", "givenName": "Leonel", "surname": "Merino", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Stuttgart", "fullName": "Magdalena Schwarzl", "givenName": "Magdalena", "surname": "Schwarzl", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Konstanz", "fullName": "Matthias Kraus", "givenName": "Matthias", "surname": "Kraus", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Stuttgart", "fullName": "Michael Sedlmair", "givenName": "Michael", "surname": "Sedlmair", "__typename": "ArticleAuthorType" }, { "affiliation": "Graz University of Technology", "fullName": "Dieter Schmalstieg", "givenName": "Dieter", "surname": "Schmalstieg", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Stuttgart", "fullName": "Daniel Weiskopf", "givenName": "Daniel", "surname": "Weiskopf", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "438-451", "year": "2020", "issn": "1554-7868", "isbn": "978-1-7281-8508-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "850800a425", "articleId": "1pysvtrmdfa", "__typename": "AdjacentArticleType" }, "next": { "fno": "850800a452", "articleId": "1pysvNRUnD2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/apsec/2017/3681/0/3681a041", "title": "Text-Mining Techniques and Tools for Systematic Literature Reviews: A Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/apsec/2017/3681a041/12OmNBlofPT", "parentPublication": { "id": "proceedings/apsec/2017/3681/0", "title": "2017 24th Asia-Pacific Software Engineering Conference (APSEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836457", "title": "A Systematic Review of Usability Studies in Augmented Reality between 2005 and 2014", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836457/12OmNvs4vsh", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eecs/2017/2085/0/2085a198", "title": "Educational Process Mining: A Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/eecs/2017/2085a198/12OmNyKrHak", "parentPublication": { "id": "proceedings/eecs/2017/2085/0", "title": "2017 European Conference on Electrical Engineering and Computer Science (EECS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seaa/2013/5091/0/5091a009", "title": "Kanban in software development: A systematic literature review", "doi": null, "abstractUrl": "/proceedings-article/seaa/2013/5091a009/12OmNyyeWuL", "parentPublication": { "id": "proceedings/seaa/2013/5091/0", "title": "2013 39th EUROMICRO Conference on Software Engineering and Advanced Applications (SEAA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edocw/2017/1568/0/1568a041", "title": "Defining Enterprise Architecture: A Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/edocw/2017/1568a041/12OmNzxPTMA", "parentPublication": { "id": "proceedings/edocw/2017/1568/0", "title": "2017 IEEE 21st International Enterprise Distributed Object Computing Workshop (EDOCW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2017/09/07779159", "title": "Reporting Usability Defects: A Systematic Literature Review", "doi": null, "abstractUrl": "/journal/ts/2017/09/07779159/13rRUIIVlel", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2018/1174/0/08659306", "title": "Systematic Literature Review of Students&#x2019; Affective Responses to Active Learning: Overview of Results", "doi": null, "abstractUrl": "/proceedings-article/fie/2018/08659306/18j9e3hUj7i", "parentPublication": { "id": "proceedings/fie/2018/1174/0", "title": "2018 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/se4rai/2022/9319/0/931900a001", "title": "Operationalizing Machine Learning Models - A Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/se4rai/2022/931900a001/1ED20Ql4AIo", "parentPublication": { "id": "proceedings/se4rai/2022/9319/0", "title": "2022 IEEE/ACM 1st International Workshop on Software Engineering for Responsible Artificial Intelligence (SE4RAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2020/9231/0/923100a067", "title": "Virtual Reality on Product Usability Testing: A Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/svr/2020/923100a067/1oZBCPBt7AA", "parentPublication": { "id": "proceedings/svr/2020/9231/0", "title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/conisoft/2021/4361/0/436100a104", "title": "Identification of Test Cases Duplication: Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/conisoft/2021/436100a104/1zHIqWqSCC4", "parentPublication": { "id": "proceedings/conisoft/2021/4361/0", "title": "2021 9th International Conference in Software Engineering Research and Innovation (CONISOFT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxE2mWh", "title": "2013 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNyugz43", "doi": "10.1109/VR.2013.6549443", "title": "Creating 3D Projection on tangible object", "normalizedTitle": "Creating 3D Projection on tangible object", "abstract": "Summary form only given. Our work explores the possibilities of using 3D projection on physical objects - a multi-projector set-up, which we aim to use for exhibition, teaching, design and early prototyping purposes. We also aim to create more immersive and engaging multimedia presentations, and change from conventional 2D media to 3D media by projecting the visual content on a physical object. This video demonstration presents two examples of tangible 3D projections. In our first demonstration we used a sphere shaped object and projected a visualization of rotating Earth on it with five projectors. We have recently done a study [1] that focused on audience's perceptions of 3D projection. Results were encouraging, and we will continue our work with the subject. In another demonstration we used a human shaped model and projected various datasets on it, including muscles, bones and digestive system. This setup contains two projectors, one in the back and the other one in the front of the body. 3D world contains 3D model of the human body and two camera views. In the future we will focus on enabling functional interactions to make projections more useful and interesting e.g. for teaching purposes. Interactions and automatic projector calibrations are the next steps to be implemented and examined in our project.", "abstracts": [ { "abstractType": "Regular", "content": "Summary form only given. Our work explores the possibilities of using 3D projection on physical objects - a multi-projector set-up, which we aim to use for exhibition, teaching, design and early prototyping purposes. We also aim to create more immersive and engaging multimedia presentations, and change from conventional 2D media to 3D media by projecting the visual content on a physical object. This video demonstration presents two examples of tangible 3D projections. In our first demonstration we used a sphere shaped object and projected a visualization of rotating Earth on it with five projectors. We have recently done a study [1] that focused on audience's perceptions of 3D projection. Results were encouraging, and we will continue our work with the subject. In another demonstration we used a human shaped model and projected various datasets on it, including muscles, bones and digestive system. This setup contains two projectors, one in the back and the other one in the front of the body. 3D world contains 3D model of the human body and two camera views. In the future we will focus on enabling functional interactions to make projections more useful and interesting e.g. for teaching purposes. Interactions and automatic projector calibrations are the next steps to be implemented and examined in our project.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Summary form only given. Our work explores the possibilities of using 3D projection on physical objects - a multi-projector set-up, which we aim to use for exhibition, teaching, design and early prototyping purposes. We also aim to create more immersive and engaging multimedia presentations, and change from conventional 2D media to 3D media by projecting the visual content on a physical object. This video demonstration presents two examples of tangible 3D projections. In our first demonstration we used a sphere shaped object and projected a visualization of rotating Earth on it with five projectors. We have recently done a study [1] that focused on audience's perceptions of 3D projection. Results were encouraging, and we will continue our work with the subject. In another demonstration we used a human shaped model and projected various datasets on it, including muscles, bones and digestive system. This setup contains two projectors, one in the back and the other one in the front of the body. 3D world contains 3D model of the human body and two camera views. In the future we will focus on enabling functional interactions to make projections more useful and interesting e.g. for teaching purposes. Interactions and automatic projector calibrations are the next steps to be implemented and examined in our project.", "fno": "06549443", "keywords": [ "Three Dimensional Displays", "Media", "Visualization", "Educational Institutions", "Abstracts", "Multimedia Communication", "User Studies", "3 D Media Installation", "3 D Display", "User Experience" ], "authors": [ { "affiliation": null, "fullName": "Pekka Nisula", "givenName": "Pekka", "surname": "Nisula", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jussi Kangasoja", "givenName": "Jussi", "surname": "Kangasoja", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Minna Karukka", "givenName": "Minna", "surname": "Karukka", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-03-01T00:00:00", "pubType": "proceedings", "pages": "1-1", "year": "2013", "issn": "1087-8270", "isbn": "978-1-4673-4795-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06549442", "articleId": "12OmNz5apEB", "__typename": "AdjacentArticleType" }, "next": { "fno": "06549444", "articleId": "12OmNyKJifs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1999/5897/0/58970026", "title": "Multi-Projector Displays Using Camera-Based Registration", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970026/12OmNAfy7KW", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04270480", "title": "Automatic texture mapping on real 3D model", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270480/12OmNBp52G8", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549425", "title": "Creating 3D Projection on tangible objects", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549425/12OmNqIzhfj", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034a815", "title": "Efficient Separation Between Projected Patterns for Multiple Projector 3D People Scanning", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034a815/12OmNwcCINM", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2001/7200/0/7200jaynes", "title": "Dynamic Shadow Removal from Front Projection Displays", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200jaynes/12OmNxG1yQD", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2011/2183/0/06092393", "title": "3D high dynamic range display system", "doi": null, "abstractUrl": "/proceedings-article/ismar/2011/06092393/12OmNzdGnwt", "parentPublication": { "id": "proceedings/ismar/2011/2183/0", "title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446433", "title": "A Calibration Method for Large-Scale Projection Based Floor Display System", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446433/13bd1gJ1v0M", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1368", "title": "Registration Techniques for Using Imperfect and Par tially Calibrated Devices in Planar Multi-Projector Displays", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1368/13rRUwInvyp", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007213", "title": "Geometric and Photometric Consistency in a Mixed Video and Galvanoscopic Scanning Laser Projection Mapping System", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007213/13rRUxcsYLX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a194", "title": "Frequency Shift Triangulation: A Robust Fringe Projection Technique for 3D Shape Acquisition in the Presence of Strong Interreflections", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a194/1ezRBg9CfUk", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJcAaH6aYg", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJfoGqGJvW", "doi": "10.1109/VRW55335.2022.00291", "title": "Apparent Shape Manipulation by Light-Field Projection onto a Retroreflective Surface", "normalizedTitle": "Apparent Shape Manipulation by Light-Field Projection onto a Retroreflective Surface", "abstract": "For the correct optical presentation of metallic luster structural color, reproducing the changes in brilliance and color that accompany the movement of the viewpoint is essential. Light-field projection onto a retroreflective surface can optically present a texture depending on the viewpoint. Through this application, the apparent shape can potentially be manipulated depending on the viewpoint. This paper proposes an optical illusion that manipulates the apparent shape of a 3D object using light-field projection based on a perceptual normal map transformation.", "abstracts": [ { "abstractType": "Regular", "content": "For the correct optical presentation of metallic luster structural color, reproducing the changes in brilliance and color that accompany the movement of the viewpoint is essential. Light-field projection onto a retroreflective surface can optically present a texture depending on the viewpoint. Through this application, the apparent shape can potentially be manipulated depending on the viewpoint. This paper proposes an optical illusion that manipulates the apparent shape of a 3D object using light-field projection based on a perceptual normal map transformation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "For the correct optical presentation of metallic luster structural color, reproducing the changes in brilliance and color that accompany the movement of the viewpoint is essential. Light-field projection onto a retroreflective surface can optically present a texture depending on the viewpoint. Through this application, the apparent shape can potentially be manipulated depending on the viewpoint. This paper proposes an optical illusion that manipulates the apparent shape of a 3D object using light-field projection based on a perceptual normal map transformation.", "fno": "840200a880", "keywords": [ "Image Reconstruction", "Image Texture", "Optical Projectors", "Three Dimensional Displays", "Retroreflective Surface", "Optical Illusion", "Manipulates", "Light Field Projection", "Apparent Shape Manipulation", "Correct Optical Presentation", "Metallic Luster Structural Color", "Brilliance", "Three Dimensional Displays", "Shape", "Conferences", "Color", "Virtual Reality", "User Interfaces", "Adaptive Optics", "Retroreflection", "Optical Illusion", "Virtual Reality", "Light Fields", "Optical Reflection", "Cameras", "Calibration" ], "authors": [ { "affiliation": "Wakayama University", "fullName": "Jion Kanaya", "givenName": "Jion", "surname": "Kanaya", "__typename": "ArticleAuthorType" }, { "affiliation": "Wakayama University", "fullName": "Toshiyuki Amano", "givenName": "Toshiyuki", "surname": "Amano", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "880-881", "year": "2022", "issn": null, "isbn": "978-1-6654-8402-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "840200a878", "articleId": "1CJf8pMNC2k", "__typename": "AdjacentArticleType" }, "next": { "fno": "840200a882", "articleId": "1CJf8HlnhRK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2010/9343/0/05643561", "title": "Effects of a retroreflective screen on depth perception in a head-mounted projection display", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643561/12OmNB9bvby", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504727", "title": "Head mounted projection for enhanced gaze in social interactions", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504727/12OmNwpGgKa", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802105", "title": "Geometrically-correct projection-based texture mapping onto a cloth", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802105/12OmNzVXNZG", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07064856", "title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a635", "title": "PseudoJumpOn: Jumping onto Steps in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a635/1CJcyy0BMpq", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a756", "title": "Robust Tangible Projection Mapping with Multi-View Contour-Based Object Tracking", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a756/1CJeF1WYP1m", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2022/6908/0/690800a021", "title": "Perceptual Control of Food Taste with Projection Mapping", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2022/690800a021/1FWmZYvi4MM", "parentPublication": { "id": "proceedings/nicoint/2022/6908/0", "title": "2022 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049693", "title": "Shadowless Projection Mapping using Retrotransmissive Optics", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049693/1KYonRpS9fW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a603", "title": "Proposal for an aerial display using dynamic projection mapping on a distant flying screen", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a603/1MNgKRrqL6g", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a583", "title": "Viewpoint Planning of Projector Placement for Spatial Augmented Reality using Star-Kernel Decomposition", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a583/1tnXRSzHELC", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrNh0vs", "title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)", "acronym": "icat", "groupId": "1001485", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNwHhoOM", "doi": "10.1109/ICAT.2013.6728902", "title": "Flying head: A head-synchronization mechanism for flying telepresence", "normalizedTitle": "Flying head: A head-synchronization mechanism for flying telepresence", "abstract": "Flying Head is a telepresence system that remotely connects humans and unmanned aerial vehicles (UAVs). UAVs are teleoperated robots used in various situations, including disaster area inspection and movie content creation. This study aimed to integrate humans and machines with different abilities (i.e., flying) to virtually augment human abilities. Precise manipulation of UAVs normally involves simultaneous control of motion parameters and requires the skill of a trained operator. This paper proposes a new method that directly connects the user's body and head motion to that of the UAV. The user's natural movement can be synchronized with UAV motions such as rotation and horizontal and vertical movements. Users can control the UAV more intuitively since such manipulations are more in accordance with their kinesthetic imagery; in other words, a user can feel as if he or she became a flying machine.", "abstracts": [ { "abstractType": "Regular", "content": "Flying Head is a telepresence system that remotely connects humans and unmanned aerial vehicles (UAVs). UAVs are teleoperated robots used in various situations, including disaster area inspection and movie content creation. This study aimed to integrate humans and machines with different abilities (i.e., flying) to virtually augment human abilities. Precise manipulation of UAVs normally involves simultaneous control of motion parameters and requires the skill of a trained operator. This paper proposes a new method that directly connects the user's body and head motion to that of the UAV. The user's natural movement can be synchronized with UAV motions such as rotation and horizontal and vertical movements. Users can control the UAV more intuitively since such manipulations are more in accordance with their kinesthetic imagery; in other words, a user can feel as if he or she became a flying machine.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Flying Head is a telepresence system that remotely connects humans and unmanned aerial vehicles (UAVs). UAVs are teleoperated robots used in various situations, including disaster area inspection and movie content creation. This study aimed to integrate humans and machines with different abilities (i.e., flying) to virtually augment human abilities. Precise manipulation of UAVs normally involves simultaneous control of motion parameters and requires the skill of a trained operator. This paper proposes a new method that directly connects the user's body and head motion to that of the UAV. The user's natural movement can be synchronized with UAV motions such as rotation and horizontal and vertical movements. Users can control the UAV more intuitively since such manipulations are more in accordance with their kinesthetic imagery; in other words, a user can feel as if he or she became a flying machine.", "fno": "06728902", "keywords": [ "Head", "Robots", "Cameras", "Magnetic Heads", "Synchronization", "Trajectory", "5 2 Information Interfaces And Presentation User Interfaces User Centered Design" ], "authors": [ { "affiliation": null, "fullName": "Keita Higuchi", "givenName": "Keita", "surname": "Higuchi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Katsuya Fujii", "givenName": "Katsuya", "surname": "Fujii", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jun Rekimoto", "givenName": "Jun", "surname": "Rekimoto", "__typename": "ArticleAuthorType" } ], "idPrefix": "icat", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-12-01T00:00:00", "pubType": "proceedings", "pages": "28-34", "year": "2013", "issn": null, "isbn": "978-4-904490-11-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06728901", "articleId": "12OmNzd7bDG", "__typename": "AdjacentArticleType" }, "next": { "fno": "06728903", "articleId": "12OmNrHjqIc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/avss/2017/2939/0/08078557", "title": "Flying object detection for automatic UAV recognition", "doi": null, "abstractUrl": "/proceedings-article/avss/2017/08078557/12OmNBVrjpF", "parentPublication": { "id": "proceedings/avss/2017/2939/0", "title": "2017 14th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev/2016/1269/0/07760017", "title": "An expedite group key establishment protocol for Flying Ad-Hoc Network(FANET)", "doi": null, "abstractUrl": "/proceedings-article/iciev/2016/07760017/12OmNC0y5Ih", "parentPublication": { "id": "proceedings/iciev/2016/1269/0", "title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uic-atc/2013/2482/0/06726212", "title": "Endless Flyer: A Continuous Flying Drone with Automatic Battery Replacement", "doi": null, "abstractUrl": "/proceedings-article/uic-atc/2013/06726212/12OmNzaQoAk", "parentPublication": { "id": "proceedings/uic-atc/2013/2482/0", "title": "2013 IEEE 10th International Conference on Ubiquitous Intelligence & Computing and 2013 IEEE 10th International Conference on Autonomic & Trusted Computing (UIC/ATC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/08/07983006", "title": "FlyCap: Markerless Motion Capture Using Multiple Autonomous Flying Cameras", "doi": null, "abstractUrl": "/journal/tg/2018/08/07983006/13rRUxYrbUO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08466636", "title": "Superman vs Giant: A Study on Spatial Perception for a Multi-Scale Mixed Reality Flying Telepresence Interface", "doi": null, "abstractUrl": "/journal/tg/2018/11/08466636/14M3DZXcLXa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2021/05/08674543", "title": "Interlocking Safety Cases for Unmanned Autonomous Systems in Shared Airspaces", "doi": null, "abstractUrl": "/journal/ts/2021/05/08674543/18IlAJ3Spdm", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/5555/01/09786610", "title": "Aerial Edge Computing: Flying Attitude-aware Collaboration for Multi-UAV", "doi": null, "abstractUrl": "/journal/tm/5555/01/09786610/1DQPAEJDR4s", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nca/2022/9730/21/10013547", "title": "An analytic hierarchy process (AHP) based UAV selection mechanism for beyond 5G networks", "doi": null, "abstractUrl": "/proceedings-article/nca/2022/10013547/1JZ43s4AygU", "parentPublication": { "id": "proceedings/nca/2022/9730/21", "title": "2022 IEEE 21st International Symposium on Network Computing and Applications (NCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2020/4199/0/09016553", "title": "3D UAV Placement and Trajectory Optimization in UAV Assisted Wireless Networks", "doi": null, "abstractUrl": "/proceedings-article/icoin/2020/09016553/1hQqXBkw0zC", "parentPublication": { "id": "proceedings/icoin/2020/4199/0", "title": "2020 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icii/2019/2977/0/297700a018", "title": "Multi-layered Threat Analysis for Unmanned Aerial Vehicle", "doi": null, "abstractUrl": "/proceedings-article/icii/2019/297700a018/1jXviBSJ86k", "parentPublication": { "id": "proceedings/icii/2019/2977/0", "title": "2019 IEEE International Conference on Industrial Internet (ICII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yfxDjRGMmc", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeQGRM0HLi", "doi": "10.1109/ISMAR-Adjunct54149.2021.00078", "title": "Tactile Telepresence for Isolated Patients", "normalizedTitle": "Tactile Telepresence for Isolated Patients", "abstract": "For isolated patients, such as COVID-19 patients in an intensive care unit, conventional video tools can provide a degree of visual telepresence. However, video alone offers, at best, an approximation of a \"through a window\" metaphor&#x2014;remote visitors, such as loved ones, cannot touch the patient to provide reassurance. Here, we present preliminary work aimed at providing an isolated patient and remote visitors with audiovisual interactions that are augmented by mediated social touch&#x2014;the perception of being touched for the isolated patient, and the perception of touching for the remote visitor. We developed a tactile telepresence system prototype that provides a remote visitor with a tablet-based, touch-video interface for conveying touch patterns on the forehead of an isolated patient. The isolated patient can see the remote visitor, see themselves with the touch patterns indicated on their forehead, and feel the touch patterns through a vibrotactile headband interface. We motivate the work, describe the system prototype, and present results from pilot studies investigating the technical feasibility of the system, along with the social and emotional affects of using the prototype system.", "abstracts": [ { "abstractType": "Regular", "content": "For isolated patients, such as COVID-19 patients in an intensive care unit, conventional video tools can provide a degree of visual telepresence. However, video alone offers, at best, an approximation of a \"through a window\" metaphor&#x2014;remote visitors, such as loved ones, cannot touch the patient to provide reassurance. Here, we present preliminary work aimed at providing an isolated patient and remote visitors with audiovisual interactions that are augmented by mediated social touch&#x2014;the perception of being touched for the isolated patient, and the perception of touching for the remote visitor. We developed a tactile telepresence system prototype that provides a remote visitor with a tablet-based, touch-video interface for conveying touch patterns on the forehead of an isolated patient. The isolated patient can see the remote visitor, see themselves with the touch patterns indicated on their forehead, and feel the touch patterns through a vibrotactile headband interface. We motivate the work, describe the system prototype, and present results from pilot studies investigating the technical feasibility of the system, along with the social and emotional affects of using the prototype system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "For isolated patients, such as COVID-19 patients in an intensive care unit, conventional video tools can provide a degree of visual telepresence. However, video alone offers, at best, an approximation of a \"through a window\" metaphor—remote visitors, such as loved ones, cannot touch the patient to provide reassurance. Here, we present preliminary work aimed at providing an isolated patient and remote visitors with audiovisual interactions that are augmented by mediated social touch—the perception of being touched for the isolated patient, and the perception of touching for the remote visitor. We developed a tactile telepresence system prototype that provides a remote visitor with a tablet-based, touch-video interface for conveying touch patterns on the forehead of an isolated patient. The isolated patient can see the remote visitor, see themselves with the touch patterns indicated on their forehead, and feel the touch patterns through a vibrotactile headband interface. We motivate the work, describe the system prototype, and present results from pilot studies investigating the technical feasibility of the system, along with the social and emotional affects of using the prototype system.", "fno": "129800a346", "keywords": [ "Haptic Interfaces", "Human Factors", "User Interfaces", "Virtual Reality", "Touch Patterns", "COVID 19 Patients", "Window Metaphor Remote Visitors", "Isolated Patient Visitors", "Mediated Social Touch", "Tactile Telepresence System Prototype", "Tablet Based Touch Video Interface", "Visualization", "Telepresence", "Forehead", "Prototypes", "Tools", "Augmented Reality", "Mediated Social Touch", "Telepresence", "Tactile", "Haptic" ], "authors": [ { "affiliation": "University of Central Florida,Orlando,FL,USA", "fullName": "Nafisa Mostofa", "givenName": "Nafisa", "surname": "Mostofa", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida,Orlando,FL,USA", "fullName": "Indira Avendano", "givenName": "Indira", "surname": "Avendano", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida,Orlando,FL,USA", "fullName": "Ryan P. McMahan", "givenName": "Ryan P.", "surname": "McMahan", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida,Orlando,FL,USA", "fullName": "Norma E. Conner", "givenName": "Norma E.", "surname": "Conner", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida,Orlando,FL,USA", "fullName": "Mindi Anderson", "givenName": "Mindi", "surname": "Anderson", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Central Florida,Orlando,FL,USA", "fullName": "Gregory F. Welch", "givenName": "Gregory F.", "surname": "Welch", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "346-351", "year": "2021", "issn": null, "isbn": "978-1-6654-1298-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "129800a340", "articleId": "1yeQMJ5IoX6", "__typename": "AdjacentArticleType" }, "next": { "fno": "129800a352", "articleId": "1yeQKcrGZvG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cw/2008/3381/0/3381a102", "title": "Virtual and Augmented Reality with Head-Tracking for Efficient Teleoperation of Groups of Robots", "doi": null, "abstractUrl": "/proceedings-article/cw/2008/3381a102/12OmNAndinH", "parentPublication": { "id": "proceedings/cw/2008/3381/0", "title": "2008 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a194", "title": "[POSTER] Avatar-Mediated Contact Interaction between Remote Users for Social Telepresence", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a194/12OmNvTTcga", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2000/6478/0/64780058", "title": "Toward a Compelling Sensation of Telepresence: Demonstrating a Portal to a Distant (Static) Office", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780058/12OmNzlD9ll", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2018/02/mcg2018020089", "title": "Beaming into the News: A System for and Case Study of Tele-Immersive Journalism", "doi": null, "abstractUrl": "/magazine/cg/2018/02/mcg2018020089/13rRUIJcWg2", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2021/1658/0/165800b227", "title": "A Secured Real-Time IoMT Application for Monitoring Isolated COVID-19 Patients using Edge Computing", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2021/165800b227/1BBze4oVt5u", "parentPublication": { "id": "proceedings/trustcom/2021/1658/0", "title": "2021 IEEE 20th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a524", "title": "Synthesizing Novel Spaces for Remote Telepresence Experiences", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a524/1J7WaFB7xNC", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798152", "title": "The Influence of Size in Augmented Reality Telepresence Avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2019/5341/0/09001827", "title": "Smart Assistive Glasses for Alzheimer&#x0027;s Patients", "doi": null, "abstractUrl": "/proceedings-article/isspit/2019/09001827/1hHMjP8EXeg", "parentPublication": { "id": "proceedings/isspit/2019/5341/0", "title": "2019 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090433", "title": "Virtual Tour: An Immersive Low Cost Telepresence System", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090433/1jIxrSY8cZa", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2023/04/09537616", "title": "TIUI: Touching Live Video for Telepresence Operation", "doi": null, "abstractUrl": "/journal/tm/2023/04/09537616/1wTinsFrkju", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNy5hRo2", "doi": "10.1109/VR.2017.7892375", "title": "Experiencing guidance in 3D spaces with a vibrotactile head-mounted display", "normalizedTitle": "Experiencing guidance in 3D spaces with a vibrotactile head-mounted display", "abstract": "Vibrotactile feedback is broadly used to support different tasks in virtual and augmented reality applications, such as navigation, communication, attentional redirection, or to enhance the sense of presence in virtual environments. Thus, we aim to include the haptic component to the most popular wearable used in VR applications: the VR headset. After studying the acuity around the head for vibrating stimuli, and trying different parameters, actuators, and configurations, we developed a haptic guidance technique to be used in a vibrotactile Head-mounted Display (HMD). Our vi-brotactile HMD was made to render the position of objects in a 3D space around the subject by varying both stimulus loci and vibration frequency. In this demonstration, the participants will interact with different scenarios where the mission is to select a number of predefined objects. However, instead of displaying occlusive graphical information to point to these objects, vibrotactile cues will provide guidance in the VR setup. Participants will see that our haptic guidance technique can be both easy to use and entertaining. (See Video: https://youtu.be/_H0MQy6QD7M).", "abstracts": [ { "abstractType": "Regular", "content": "Vibrotactile feedback is broadly used to support different tasks in virtual and augmented reality applications, such as navigation, communication, attentional redirection, or to enhance the sense of presence in virtual environments. Thus, we aim to include the haptic component to the most popular wearable used in VR applications: the VR headset. After studying the acuity around the head for vibrating stimuli, and trying different parameters, actuators, and configurations, we developed a haptic guidance technique to be used in a vibrotactile Head-mounted Display (HMD). Our vi-brotactile HMD was made to render the position of objects in a 3D space around the subject by varying both stimulus loci and vibration frequency. In this demonstration, the participants will interact with different scenarios where the mission is to select a number of predefined objects. However, instead of displaying occlusive graphical information to point to these objects, vibrotactile cues will provide guidance in the VR setup. Participants will see that our haptic guidance technique can be both easy to use and entertaining. (See Video: https://youtu.be/_H0MQy6QD7M).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Vibrotactile feedback is broadly used to support different tasks in virtual and augmented reality applications, such as navigation, communication, attentional redirection, or to enhance the sense of presence in virtual environments. Thus, we aim to include the haptic component to the most popular wearable used in VR applications: the VR headset. After studying the acuity around the head for vibrating stimuli, and trying different parameters, actuators, and configurations, we developed a haptic guidance technique to be used in a vibrotactile Head-mounted Display (HMD). Our vi-brotactile HMD was made to render the position of objects in a 3D space around the subject by varying both stimulus loci and vibration frequency. In this demonstration, the participants will interact with different scenarios where the mission is to select a number of predefined objects. However, instead of displaying occlusive graphical information to point to these objects, vibrotactile cues will provide guidance in the VR setup. Participants will see that our haptic guidance technique can be both easy to use and entertaining. (See Video: https://youtu.be/_H0MQy6QD7M).", "fno": "07892375", "keywords": [ "Resists", "Haptic Interfaces", "Three Dimensional Displays", "Headphones", "Vibrations", "Navigation", "Human Computer Interaction", "H 5 1 Human Computer Interaction HCI Interaction Devices Haptic Devices", "H 5 2 Human Computer Interaction HCI Interaction Paradigms Virtual Reality" ], "authors": [ { "affiliation": "Universidade Federal do Rio Grande do Sul - UFRGS, Instituto de Informática. Porto Alegre, Brazil", "fullName": "Victor Adriel de Jesus Oliveira", "givenName": "Victor Adriel", "surname": "de Jesus Oliveira", "__typename": "ArticleAuthorType" }, { "affiliation": "Fondazione Istituto Italiano di Tecnologia - IIT, RBCS. Genoa, Italy", "fullName": "Luca Brayda", "givenName": "Luca", "surname": "Brayda", "__typename": "ArticleAuthorType" }, { "affiliation": "Universidade Federal do Rio Grande do Sul - UFRGS, Instituto de Informática. Porto Alegre, Brazil", "fullName": "Luciana Nedel", "givenName": "Luciana", "surname": "Nedel", "__typename": "ArticleAuthorType" }, { "affiliation": "Universidade Federal do Rio Grande do Sul - UFRGS, Instituto de Informática. Porto Alegre, Brazil", "fullName": "Anderson Maciel", "givenName": "Anderson", "surname": "Maciel", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "453-454", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892374", "articleId": "12OmNy49sP9", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892376", "articleId": "12OmNC2OSOD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/haptics/2010/6821/0/05444633", "title": "A vibrotactile feedback approach to posture guidance", "doi": null, "abstractUrl": "/proceedings-article/haptics/2010/05444633/12OmNAo45Ki", "parentPublication": { "id": "proceedings/haptics/2010/6821/0", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273612", "title": "Emotional responses of vibrotactile-thermal stimuli: Effects of constant-temperature thermal stimuli", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273612/12OmNqMPfQu", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446524", "title": "HangerOVER: Development of HMO-Embedded Haptic Display Using the Hanger Reflex and VR Application", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446524/13bd1fdV4l2", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2012/01/tth2012010014", "title": "Vibrotactile Rendering for a Traveling Vibrotactile Wave Based on a Haptic Processor", "doi": null, "abstractUrl": "/journal/th/2012/01/tth2012010014/13rRUEgs2C8", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/04/07829406", "title": "Designing a Vibrotactile Head-Mounted Display for Spatial Awareness in 3D Spaces", "doi": null, "abstractUrl": "/journal/tg/2017/04/07829406/13rRUxDIthh", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/01/tth2013010013", "title": "Comparison of Visual and Vibrotactile Feedback Methods for Seated Posture Guidance", "doi": null, "abstractUrl": "/journal/th/2013/01/tth2013010013/13rRUxcKzVp", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2015/03/07060731", "title": "Vibrotactile Guidance for Wayfinding of Blind Walkers", "doi": null, "abstractUrl": "/journal/th/2015/03/07060731/13rRUy2YLT9", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797956", "title": "Haptic Compass: Active Vibrotactile Feedback of Physical Object for Path Guidance", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797956/1cJ17BLEK88", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a042", "title": "Smart Haproxy: A Novel Vibrotactile Feedback Prototype Combining Passive and Active Haptic in AR Interaction", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a042/1gysov56h20", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2019/4752/0/09212865", "title": "Identification of Vibrotactile Flow Patterns on a Handheld Haptic Device", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2019/09212865/1nHRQWVTfMc", "parentPublication": { "id": "proceedings/icvrv/2019/4752/0", "title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJbEwHHqEg", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJc1DGu0Vy", "doi": "10.1109/VR51125.2022.00064", "title": "HoloLogger: Keystroke Inference on Mixed Reality Head Mounted Displays", "normalizedTitle": "HoloLogger: Keystroke Inference on Mixed Reality Head Mounted Displays", "abstract": "When using personal computing services in mixed reality (MR) such as online payment and social media, sensitive information and account passwords must be typed in MR. To design secure MR systems and build up user trust, it is imperative to first understand the security threat to the sensitive MR input. Although keystroke inference attacks by analyzing human-computer interaction in videos or via wireless signals have been successful, they require placing extra hardware near the user which is easily noticeable in practice. In this paper, we expose a more dangerous malware-based attack through the vulnerability that no permission is required for accessing MR motion data. We aim to monitor MR headset motion and infer the user input through a benign App. Realizing the attack system requires addressing unique challenges in MR such as six-degree-of-freedom (6DoF) device motion and no explicit motion signal for keystroke identification. To this end, we present HoloLogger, the first malware-based keystroke inference attack system on HoloLens. HoloLogger is empowered by a 6DoF-head-motion-driven key tracking scheme and an air-tap-pattern-based keystroke inference framework. Extensive evaluations with 25 users and 750 inference trials of passwords consisting of 4&#x2013;8 lowercase English letters demonstrate that HoloLogger successfully achieves a top-5 accuracy of 93%. HoloLogger is also robust in various environments such as different user positions and input categories.", "abstracts": [ { "abstractType": "Regular", "content": "When using personal computing services in mixed reality (MR) such as online payment and social media, sensitive information and account passwords must be typed in MR. To design secure MR systems and build up user trust, it is imperative to first understand the security threat to the sensitive MR input. Although keystroke inference attacks by analyzing human-computer interaction in videos or via wireless signals have been successful, they require placing extra hardware near the user which is easily noticeable in practice. In this paper, we expose a more dangerous malware-based attack through the vulnerability that no permission is required for accessing MR motion data. We aim to monitor MR headset motion and infer the user input through a benign App. Realizing the attack system requires addressing unique challenges in MR such as six-degree-of-freedom (6DoF) device motion and no explicit motion signal for keystroke identification. To this end, we present HoloLogger, the first malware-based keystroke inference attack system on HoloLens. HoloLogger is empowered by a 6DoF-head-motion-driven key tracking scheme and an air-tap-pattern-based keystroke inference framework. Extensive evaluations with 25 users and 750 inference trials of passwords consisting of 4&#x2013;8 lowercase English letters demonstrate that HoloLogger successfully achieves a top-5 accuracy of 93%. HoloLogger is also robust in various environments such as different user positions and input categories.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "When using personal computing services in mixed reality (MR) such as online payment and social media, sensitive information and account passwords must be typed in MR. To design secure MR systems and build up user trust, it is imperative to first understand the security threat to the sensitive MR input. Although keystroke inference attacks by analyzing human-computer interaction in videos or via wireless signals have been successful, they require placing extra hardware near the user which is easily noticeable in practice. In this paper, we expose a more dangerous malware-based attack through the vulnerability that no permission is required for accessing MR motion data. We aim to monitor MR headset motion and infer the user input through a benign App. Realizing the attack system requires addressing unique challenges in MR such as six-degree-of-freedom (6DoF) device motion and no explicit motion signal for keystroke identification. To this end, we present HoloLogger, the first malware-based keystroke inference attack system on HoloLens. HoloLogger is empowered by a 6DoF-head-motion-driven key tracking scheme and an air-tap-pattern-based keystroke inference framework. Extensive evaluations with 25 users and 750 inference trials of passwords consisting of 4–8 lowercase English letters demonstrate that HoloLogger successfully achieves a top-5 accuracy of 93%. HoloLogger is also robust in various environments such as different user positions and input categories.", "fno": "961700a445", "keywords": [ "Augmented Reality", "Helmet Mounted Displays", "Invasive Software", "Mobile Computing", "Pattern Classification", "Security Of Data", "Virtual Reality", "Holo Logger", "6 Do F Head Motion Driven Key Tracking Scheme", "Air Tap Pattern Based Keystroke Inference", "Different User Positions", "Input Categories", "Mixed Reality Head Mounted Displays", "Personal Computing Services", "Online Payment", "Social Media", "Sensitive Information", "Account Passwords", "Secure MR Systems", "User Trust", "Security Threat", "Sensitive MR Input", "Keystroke Inference Attacks", "Human Computer Interaction", "Wireless Signals", "Extra Hardware", "Dangerous Malware Based Attack", "Accessing MR Motion Data", "MR Headset Motion", "Six Degree Of Freedom Device Motion", "Explicit Motion Signal", "Keystroke Identification", "Malware Based Keystroke Inference Attack System", "Wireless Communication", "Wireless Sensor Networks", "Solid Modeling", "Three Dimensional Displays", "Tracking", "Social Networking Online", "Mixed Reality", "Human Centered Computing", "Human Computer Interaction", "Mixed Augmented Reality", "Security And Privacy", "Human And Societal Aspects Of Security And Privacy", "Privacy Protection" ], "authors": [ { "affiliation": "George Mason University", "fullName": "Shiqing Luo", "givenName": "Shiqing", "surname": "Luo", "__typename": "ArticleAuthorType" }, { "affiliation": "Automatic Data Processing, Inc.", "fullName": "Xinyu Hu", "givenName": "Xinyu", "surname": "Hu", "__typename": "ArticleAuthorType" }, { "affiliation": "George Mason University", "fullName": "Zhisheng Yan", "givenName": "Zhisheng", "surname": "Yan", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "445-454", "year": "2022", "issn": null, "isbn": "978-1-6654-9617-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "961700a436", "articleId": "1CJbVhCZuqA", "__typename": "AdjacentArticleType" }, "next": { "fno": "961700a455", "articleId": "1CJbMNuUHAs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2016/4571/0/4571a399", "title": "Tile Based HEVC Video for Head Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ism/2016/4571a399/12OmNwF0BRC", "parentPublication": { "id": "proceedings/ism/2016/4571/0", "title": "2016 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sp/2018/4353/0/435301a144", "title": "EyeTell: Video-Assisted Touchscreen Keystroke Inference from Eye Movements", "doi": null, "abstractUrl": "/proceedings-article/sp/2018/435301a144/12OmNzC5SIa", "parentPublication": { "id": "proceedings/sp/2018/4353/0", "title": "2018 IEEE Symposium on Security and Privacy (SP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892302", "title": "Estimating the motion-to-photon latency in head mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892302/12OmNznkKb4", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/07/ttg2011070888", "title": "Natural Perspective Projections for Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08456525", "title": "Towards Efficient Visual Guidance in Limited Field-of-View Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2018/11/08456525/14M3DYGRu3n", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699201", "title": "SWAG Demo: Smart Watch Assisted Gesture Interaction for Mixed Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699201/19F1VvOVhew", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a044", "title": "Mixed Reality Tunneling Effects for Stereoscopic Untethered Video-See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a044/1JrR3Kf8QkE", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a414", "title": "Evaluating the Object-Centered User Interface in Head-Worn Mixed Reality Environment", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a414/1JrRiVjEd44", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090681", "title": "Accuracy of Commodity Finger Tracking Systems for Virtual Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090681/1jIxoZtoPlK", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09463728", "title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0J09XMdy", "doi": "10.1109/VR.2019.8797925", "title": "Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays", "normalizedTitle": "Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays", "abstract": "Wearable VR/AR devices provide users with fully immersive experience in a virtual environment, enabling possibilities to reshape the forms of entertainment and telepresence. While the body language is a crucial element in effective communication, wearing a head-mounted display (HMD) could severely hinder the eye contact and block facial expressions. We present a novel headset removal technique that enables high-quality occlusion-free communication in virtual environment. In particular, our solution synthesizes photoreal faces in the occluded region with faithful reconstruction of facial expressions and eye movements. Towards this goal, we develop a novel capture setup that consists of two near-infrared (NIR) cameras inside the HMD for eye capturing and one external RGB camera for recording visible face regions. To enable realistic face synthesis with consistent illuminations, we propose a data-driven approach to fuse the narrow-field-of-view NIR images with the RGB image captured from the external camera. In addition, to generate pho-torealistic eyes, a dedicated algorithm is proposed to colorize the NIR eye images and further rectify the color distortion caused by the non-linear mapping of IR light sensitivity. Experimental results demonstrate that our framework is capable to synthesize high-fidelity unoccluded facial images with accurate tracking of head motion, facial expression and eye movement.", "abstracts": [ { "abstractType": "Regular", "content": "Wearable VR/AR devices provide users with fully immersive experience in a virtual environment, enabling possibilities to reshape the forms of entertainment and telepresence. While the body language is a crucial element in effective communication, wearing a head-mounted display (HMD) could severely hinder the eye contact and block facial expressions. We present a novel headset removal technique that enables high-quality occlusion-free communication in virtual environment. In particular, our solution synthesizes photoreal faces in the occluded region with faithful reconstruction of facial expressions and eye movements. Towards this goal, we develop a novel capture setup that consists of two near-infrared (NIR) cameras inside the HMD for eye capturing and one external RGB camera for recording visible face regions. To enable realistic face synthesis with consistent illuminations, we propose a data-driven approach to fuse the narrow-field-of-view NIR images with the RGB image captured from the external camera. In addition, to generate pho-torealistic eyes, a dedicated algorithm is proposed to colorize the NIR eye images and further rectify the color distortion caused by the non-linear mapping of IR light sensitivity. Experimental results demonstrate that our framework is capable to synthesize high-fidelity unoccluded facial images with accurate tracking of head motion, facial expression and eye movement.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Wearable VR/AR devices provide users with fully immersive experience in a virtual environment, enabling possibilities to reshape the forms of entertainment and telepresence. While the body language is a crucial element in effective communication, wearing a head-mounted display (HMD) could severely hinder the eye contact and block facial expressions. We present a novel headset removal technique that enables high-quality occlusion-free communication in virtual environment. In particular, our solution synthesizes photoreal faces in the occluded region with faithful reconstruction of facial expressions and eye movements. Towards this goal, we develop a novel capture setup that consists of two near-infrared (NIR) cameras inside the HMD for eye capturing and one external RGB camera for recording visible face regions. To enable realistic face synthesis with consistent illuminations, we propose a data-driven approach to fuse the narrow-field-of-view NIR images with the RGB image captured from the external camera. In addition, to generate pho-torealistic eyes, a dedicated algorithm is proposed to colorize the NIR eye images and further rectify the color distortion caused by the non-linear mapping of IR light sensitivity. Experimental results demonstrate that our framework is capable to synthesize high-fidelity unoccluded facial images with accurate tracking of head motion, facial expression and eye movement.", "fno": "08797925", "keywords": [ "Cameras", "Emotion Recognition", "Face Recognition", "Helmet Mounted Displays", "Image Colour Analysis", "Image Motion Analysis", "Image Reconstruction", "Infrared Imaging", "Object Tracking", "Virtual Reality", "Head Mounted Display", "Virtual Environment", "Entertainment", "Telepresence", "Body Language", "HMD", "Eye Contact", "Block Facial Expressions", "Novel Headset Removal Technique", "High Quality Occlusion Free Communication", "Occluded Region", "Eye Movements", "Novel Capture Setup", "Eye Capturing", "External RGB Camera", "Visible Face Regions", "Realistic Face Synthesis", "Consistent Illuminations", "Narrow Field Of View NIR Images", "RGB Image", "External Camera", "Pho Torealistic Eyes", "NIR Eye Images", "High Fidelity Unoccluded Facial Images", "Head Motion", "Facial Expression", "Eye Movement", "Photoreal Face Synthesis", "Mask Off", "Face Image Synthesis", "Face", "Cameras", "Headphones", "Three Dimensional Displays", "Image Reconstruction", "Resists", "Tracking", "AR VR", "Headset Removal", "Face Inpainting", "Eye Synthesis" ], "authors": [ { "affiliation": "University of Kentucky", "fullName": "Yajie Zhao", "givenName": "Yajie", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "USC Institute for Creative Technologies", "fullName": "Qingguo Xu", "givenName": "Qingguo", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "North Carolina Central University", "fullName": "Weikai Chen", "givenName": "Weikai", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Kentucky", "fullName": "Chao Du", "givenName": "Chao", "surname": "Du", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Kentucky", "fullName": "Jun Xing", "givenName": "Jun", "surname": "Xing", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Kentucky", "fullName": "Xinyu Huang", "givenName": "Xinyu", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Kentucky Baidu Inc., Beijing, China", "fullName": "Ruigang Yang", "givenName": "Ruigang", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "267-276", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798154", "articleId": "1cJ0RlqMKmQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797862", "articleId": "1cJ1hkU8yNq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a245", "title": "[POSTER] An Accurate Calibration Method for Optical See-Through Head-Mounted Displays Based on Actual Eye-Observation Model", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a245/12OmNwErpLb", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892245", "title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892245/12OmNwkR5tU", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2002/1781/0/17810149", "title": "Diminishing Head-Mounted Display for Shared Mixed Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2002/17810149/12OmNy4r3Zp", "parentPublication": { "id": "proceedings/ismar/2002/1781/0", "title": "Proceedings. International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09786815", "title": "Analyzing the Effect of Diverse Gaze and Head Direction on Facial Expression Recognition with Photo-Reflective Sensors Embedded in a Head-Mounted Display", "doi": null, "abstractUrl": "/journal/tg/5555/01/09786815/1DSumaVNxG8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a477", "title": "Digital Precompensation for Luminance Nonuniformities in Augmented Reality Head Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a477/1J7WkpqbbYA", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a082", "title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a470", "title": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a470/1JrQZ2SKCuQ", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797852", "title": "Perception of Volumetric Characters&#x0027; Eye-Gaze Direction in Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797852/1cJ0UskDCRa", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a001", "title": "Development of Easy Attachable Biological Information Measurement Device for Various Head Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a001/1fHkmnjJYru", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a011", "title": "Edge-Guided Near-Eye Image Analysis for Head Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a011/1yeCW4N7Y9a", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0XW5LFNC", "doi": "10.1109/VR.2019.8797725", "title": "Comparing Techniques for Visualizing Moving Out-of-View Objects in Head-mounted Virtual Reality", "normalizedTitle": "Comparing Techniques for Visualizing Moving Out-of-View Objects in Head-mounted Virtual Reality", "abstract": "Current head-mounted displays (HMDs) have a limited field-of-view (FOV). A limited FOV further decreases the already restricted human visual range and amplifies the problem of objects receding from view (e.g., opponents in computer games). However, there is no previous work that investigates how to best perceive moving out-of-view objects on head-mounted displays. In this paper, we compare two visualization approaches: (1) Overview+detail, with 3D Radar, and (2) Focus+context, with EyeSee360, in a user study to evaluate their performances for visualizing moving out-of-view objects. We found that using 3D Radar resulted in a significantly lower movement estimation error and higher usability, measured by the system usability scale. 3D Radar was also preferred by 13 out of 15 participants for visualization of moving out-of-view objects.", "abstracts": [ { "abstractType": "Regular", "content": "Current head-mounted displays (HMDs) have a limited field-of-view (FOV). A limited FOV further decreases the already restricted human visual range and amplifies the problem of objects receding from view (e.g., opponents in computer games). However, there is no previous work that investigates how to best perceive moving out-of-view objects on head-mounted displays. In this paper, we compare two visualization approaches: (1) Overview+detail, with 3D Radar, and (2) Focus+context, with EyeSee360, in a user study to evaluate their performances for visualizing moving out-of-view objects. We found that using 3D Radar resulted in a significantly lower movement estimation error and higher usability, measured by the system usability scale. 3D Radar was also preferred by 13 out of 15 participants for visualization of moving out-of-view objects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Current head-mounted displays (HMDs) have a limited field-of-view (FOV). A limited FOV further decreases the already restricted human visual range and amplifies the problem of objects receding from view (e.g., opponents in computer games). However, there is no previous work that investigates how to best perceive moving out-of-view objects on head-mounted displays. In this paper, we compare two visualization approaches: (1) Overview+detail, with 3D Radar, and (2) Focus+context, with EyeSee360, in a user study to evaluate their performances for visualizing moving out-of-view objects. We found that using 3D Radar resulted in a significantly lower movement estimation error and higher usability, measured by the system usability scale. 3D Radar was also preferred by 13 out of 15 participants for visualization of moving out-of-view objects.", "fno": "08797725", "keywords": [ "Data Visualisation", "Helmet Mounted Displays", "Virtual Reality", "Visualization Approaches", "3 D Radar", "Out Of View Objects", "Head Mounted Virtual Reality", "Field Of View", "FOV", "Restricted Human Visual Range", "Head Mounted Displays", "Eye See 360", "Moving Out Of View Object Visualization", "System Usability Scale", "Visualization", "Three Dimensional Displays", "Radar", "Games", "Estimation Error", "Usability", "Meters", "Human Centered Computing X 2014 Visualization X 2014 Visualization Techniques", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Empirical Studies In HCI" ], "authors": [ { "affiliation": "University of Oldenburg", "fullName": "Uwe Gruenefeld", "givenName": "Uwe", "surname": "Gruenefeld", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Oldenburg", "fullName": "Ilja Koethe", "givenName": "Ilja", "surname": "Koethe", "__typename": "ArticleAuthorType" }, { "affiliation": "OFFIS - Institute for IT", "fullName": "Daniel Lange", "givenName": "Daniel", "surname": "Lange", "__typename": "ArticleAuthorType" }, { "affiliation": "OFFIS - Institute for IT", "fullName": "Sebastian Weiß", "givenName": "Sebastian", "surname": "Weiß", "__typename": "ArticleAuthorType" }, { "affiliation": "OFFIS - Institute for IT", "fullName": "Wilko Heuten", "givenName": "Wilko", "surname": "Heuten", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "742-746", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798135", "articleId": "1cJ0KovI6wo", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797871", "articleId": "1cJ0IeMn35S", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2009/3965/0/04811230", "title": "Poster: A virtual walkthrough system with a wide field-of-view stereo head mounted projective display", "doi": null, "abstractUrl": "/proceedings-article/3dui/2009/04811230/12OmNAJVcFm", "parentPublication": { "id": "proceedings/3dui/2009/3965/0", "title": "2009 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402574", "title": "Occlusion capable optical see-through head-mounted display using freeform optics", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2007/1749/0/04538848", "title": "A Wide Field-of-view Head Mounted Projective Display using Hyperbolic Half-silvered Mirrors", "doi": null, "abstractUrl": "/proceedings-article/ismar/2007/04538848/12OmNBv2Ciz", "parentPublication": { "id": "proceedings/ismar/2007/1749/0", "title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2018/2290/0/08343240", "title": "A probability model of stationary object using radar time-frequency spectrum", "doi": null, "abstractUrl": "/proceedings-article/icoin/2018/08343240/12OmNC2OSLy", "parentPublication": { "id": "proceedings/icoin/2018/2290/0", "title": "2018 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2008/2047/0/04476604", "title": "Poster: Sliding Viewport for Head Mounted Displays in Interactive Environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476604/12OmNzdoMAW", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08456525", "title": "Towards Efficient Visual Guidance in Limited Field-of-View Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2018/11/08456525/14M3DYGRu3n", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2018/9269/0/926900a116", "title": "Understanding Head-Mounted Display FOV in Maritime Search and Rescue Object Detection", "doi": null, "abstractUrl": "/proceedings-article/aivr/2018/926900a116/17D45Xi9rWr", "parentPublication": { "id": "proceedings/aivr/2018/9269/0", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a139", "title": "Effects of Field of View on Dynamic Out-of-View Target Search in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a139/1CJct7k4IQU", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a301", "title": "Super Wide-view Optical See-through Head Mounted Displays with Per-pixel Occlusion Capability", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a301/1pysxIK95Yc", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismii/2021/1290/0/129000a222", "title": "Research on Railway Obstacle Detection Method Based on Radar", "doi": null, "abstractUrl": "/proceedings-article/ismii/2021/129000a222/1sZ2OhXONEc", "parentPublication": { "id": "proceedings/ismii/2021/1290/0", "title": "2021 7th International Symposium on Mechatronics and Industrial Informatics (ISMII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNyYDDME", "doi": "10.1109/VR.2017.7892243", "title": "Optimizing placement of commodity depth cameras for known 3D dynamic scene capture", "normalizedTitle": "Optimizing placement of commodity depth cameras for known 3D dynamic scene capture", "abstract": "Commodity depth cameras, such as the Microsoft Kinect®, have been widely used for the capture and reconstruction of the 3D structure of room-sized dynamic scenes. Camera placement and coverage during capture significantly impact the quality of the resulting reconstruction. In particular, dynamic occlusions and sensor interference have been shown to result in poor resolution and holes in the reconstruction results. This paper presents a novel algorithmic framework and a method for off-line optimization of depth cameras placements for a given 3D dynamic scene, simulated using virtual 3D models. We derive a fitness metric for a particular configuration of sensors by combining factors such as visibility and resolution of the entire dynamic scene with probabilities of interference between sensors. We employ this fitness metric both in a greedy algorithm that determines the number of depth cameras needed to cover the scene, and in a simulated annealing algorithm that optimizes the placements of those sensors. We compare our algorithm's optimized placements with manual sensor placements for a real dynamic scene. We present quantitative assessments using our fitness metric, as well as qualitative assessments to demonstrate that our algorithm not only enhances the resolution and total coverage of the reconstruction, but also fills in voids by avoiding occlusions and sensor interference when compared with the reconstruction of the same scene using mual sensor placement.", "abstracts": [ { "abstractType": "Regular", "content": "Commodity depth cameras, such as the Microsoft Kinect®, have been widely used for the capture and reconstruction of the 3D structure of room-sized dynamic scenes. Camera placement and coverage during capture significantly impact the quality of the resulting reconstruction. In particular, dynamic occlusions and sensor interference have been shown to result in poor resolution and holes in the reconstruction results. This paper presents a novel algorithmic framework and a method for off-line optimization of depth cameras placements for a given 3D dynamic scene, simulated using virtual 3D models. We derive a fitness metric for a particular configuration of sensors by combining factors such as visibility and resolution of the entire dynamic scene with probabilities of interference between sensors. We employ this fitness metric both in a greedy algorithm that determines the number of depth cameras needed to cover the scene, and in a simulated annealing algorithm that optimizes the placements of those sensors. We compare our algorithm's optimized placements with manual sensor placements for a real dynamic scene. We present quantitative assessments using our fitness metric, as well as qualitative assessments to demonstrate that our algorithm not only enhances the resolution and total coverage of the reconstruction, but also fills in voids by avoiding occlusions and sensor interference when compared with the reconstruction of the same scene using mual sensor placement.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Commodity depth cameras, such as the Microsoft Kinect®, have been widely used for the capture and reconstruction of the 3D structure of room-sized dynamic scenes. Camera placement and coverage during capture significantly impact the quality of the resulting reconstruction. In particular, dynamic occlusions and sensor interference have been shown to result in poor resolution and holes in the reconstruction results. This paper presents a novel algorithmic framework and a method for off-line optimization of depth cameras placements for a given 3D dynamic scene, simulated using virtual 3D models. We derive a fitness metric for a particular configuration of sensors by combining factors such as visibility and resolution of the entire dynamic scene with probabilities of interference between sensors. We employ this fitness metric both in a greedy algorithm that determines the number of depth cameras needed to cover the scene, and in a simulated annealing algorithm that optimizes the placements of those sensors. We compare our algorithm's optimized placements with manual sensor placements for a real dynamic scene. We present quantitative assessments using our fitness metric, as well as qualitative assessments to demonstrate that our algorithm not only enhances the resolution and total coverage of the reconstruction, but also fills in voids by avoiding occlusions and sensor interference when compared with the reconstruction of the same scene using mual sensor placement.", "fno": "07892243", "keywords": [ "Cameras", "Three Dimensional Displays", "Surface Reconstruction", "Measurement", "Heuristic Algorithms", "Solid Modeling", "Computational Modeling", "G 1 6 Numerical Analysis Optimization Global Optimization", "Simulated Annealing", "I 4 8 Computing Methodologies Image Processing And Computer Vision Reconstruction", "Scene Analysis", "I 6 3 Computing Methodologies Simulation And Modeling Applications", "Model Development" ], "authors": [ { "affiliation": "University of North Carolina at Chapel Hill, USA", "fullName": "Rohan Chabra", "givenName": "Rohan", "surname": "Chabra", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina at Chapel Hill, USA", "fullName": "Adrian Ilie", "givenName": "Adrian", "surname": "Ilie", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina at Chapel Hill, USA", "fullName": "Nicholas Rewkowski", "givenName": "Nicholas", "surname": "Rewkowski", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina at Chapel Hill, USA", "fullName": "Young-Woon Cha", "givenName": "Young-Woon", "surname": "Cha", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina at Chapel Hill, USA", "fullName": "Henry Fuchs", "givenName": "Henry", "surname": "Fuchs", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "157-166", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892242", "articleId": "12OmNqIQSkJ", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892244", "articleId": "12OmNwlHSSf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismarw/2016/3740/0/07836456", "title": "Streaming and Exploration of Dynamically Changing Dense 3D Reconstructions in Immersive Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836456/12OmNAtK4kY", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802048", "title": "Temporally enhanced 3D capture of room-sized dynamic scenes with commodity depth cameras", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802048/12OmNBCqbIs", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2007/1016/0/04284846", "title": "Model-Based Markerless Human Body Motion Capture using Multiple Cameras", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284846/12OmNvmXJ37", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671769", "title": "Scanning and tracking dynamic objects with commodity depth cameras", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671769/12OmNxymo5C", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcabes/2015/6593/0/6593a352", "title": "Quick Capture and Reconstruction for 3D Head", "doi": null, "abstractUrl": "/proceedings-article/dcabes/2015/6593a352/12OmNyUnEKB", "parentPublication": { "id": "proceedings/dcabes/2015/6593/0", "title": "2015 14th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/08/07983006", "title": "FlyCap: Markerless Motion Capture Using Multiple Autonomous Flying Cameras", "doi": null, "abstractUrl": "/journal/tg/2018/08/07983006/13rRUxYrbUO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08458443", "title": "Towards Fully Mobile 3D Face, Body, and Environment Capture Using Only Head-worn Cameras", "doi": null, "abstractUrl": "/journal/tg/2018/11/08458443/14M3E0YMV5C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200g219", "title": "DeepMultiCap: Performance Capture of Multiple Characters Using Sparse Multiview Cameras", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200g219/1BmEybxUSnC", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a001", "title": "MoCapDeform: Monocular 3D Human Motion Capture in Deformable Scenes", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a001/1KYso7Sd0Zy", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/05/08910408", "title": "Superpixel Soup: Monocular Dense 3D Reconstruction of a Complex Dynamic Scene", "doi": null, "abstractUrl": "/journal/tp/2021/05/08910408/1faptKO9nMI", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tuAeQeDJja", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tuAWRhj1Uk", "doi": "10.1109/VR50410.2021.00087", "title": "Mobile. Egocentric Human Body Motion Reconstruction Using Only Eyeglasses-mounted Cameras and a Few Body-worn Inertial Sensors", "normalizedTitle": "Mobile. Egocentric Human Body Motion Reconstruction Using Only Eyeglasses-mounted Cameras and a Few Body-worn Inertial Sensors", "abstract": "We envision a convenient telepresence system available to users anywhere, anytime. Such a system requires displays and sensors embedded in commonly worn items such as eyeglasses, wristwatches, and shoes. To that end, we present a standalone real-time system for the dynamic 3D capture of a person, relying only on cameras embedded into a head-worn device, and on Inertial Measurement Units (IMUs) worn on the wrists and ankles. Our prototype system egocentrically reconstructs the wearer's motion via learning-based pose estimation, which fuses inputs from visual and inertial sensors that complement each other, overcoming challenges such as inconsistent limb visibility in head-worn views, as well as pose ambiguity from sparse IMUs. The estimated pose is continuously re-targeted to a prescanned surface model, resulting in a high-fidelity 3D reconstruction. We demonstrate our system by reconstructing various human body movements and show that our visual-inertial learning-based method, which runs in real time, outperforms both visual-only and inertial-only approaches. We captured an egocentric visual-inertial 3D human pose dataset publicly available at https://sites.google.com/site/youngwooncha/egovip for training and evaluating similar methods.", "abstracts": [ { "abstractType": "Regular", "content": "We envision a convenient telepresence system available to users anywhere, anytime. Such a system requires displays and sensors embedded in commonly worn items such as eyeglasses, wristwatches, and shoes. To that end, we present a standalone real-time system for the dynamic 3D capture of a person, relying only on cameras embedded into a head-worn device, and on Inertial Measurement Units (IMUs) worn on the wrists and ankles. Our prototype system egocentrically reconstructs the wearer's motion via learning-based pose estimation, which fuses inputs from visual and inertial sensors that complement each other, overcoming challenges such as inconsistent limb visibility in head-worn views, as well as pose ambiguity from sparse IMUs. The estimated pose is continuously re-targeted to a prescanned surface model, resulting in a high-fidelity 3D reconstruction. We demonstrate our system by reconstructing various human body movements and show that our visual-inertial learning-based method, which runs in real time, outperforms both visual-only and inertial-only approaches. We captured an egocentric visual-inertial 3D human pose dataset publicly available at https://sites.google.com/site/youngwooncha/egovip for training and evaluating similar methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We envision a convenient telepresence system available to users anywhere, anytime. Such a system requires displays and sensors embedded in commonly worn items such as eyeglasses, wristwatches, and shoes. To that end, we present a standalone real-time system for the dynamic 3D capture of a person, relying only on cameras embedded into a head-worn device, and on Inertial Measurement Units (IMUs) worn on the wrists and ankles. Our prototype system egocentrically reconstructs the wearer's motion via learning-based pose estimation, which fuses inputs from visual and inertial sensors that complement each other, overcoming challenges such as inconsistent limb visibility in head-worn views, as well as pose ambiguity from sparse IMUs. The estimated pose is continuously re-targeted to a prescanned surface model, resulting in a high-fidelity 3D reconstruction. We demonstrate our system by reconstructing various human body movements and show that our visual-inertial learning-based method, which runs in real time, outperforms both visual-only and inertial-only approaches. We captured an egocentric visual-inertial 3D human pose dataset publicly available at https://sites.google.com/site/youngwooncha/egovip for training and evaluating similar methods.", "fno": "255600a616", "keywords": [ "Cameras", "Image Motion Analysis", "Image Reconstruction", "Learning Artificial Intelligence", "Pose Estimation", "Telecontrol", "Virtual Reality", "High Fidelity 3 D Reconstruction", "Human Body Movements", "Visual Inertial Learning Based Method", "Inertial Only Approaches", "Visual Inertial 3 D Human", "Egocentric Human Body Motion Reconstruction", "Eyeglasses Mounted Cameras", "Body Worn Inertial Sensors", "Telepresence System", "Commonly Worn Items", "Standalone Real Time System", "Dynamic 3 D Capture", "Head Worn Device", "Inertial Measurement Units", "Visual Sensors", "Inconsistent Limb Visibility", "Head Worn Views", "Sparse IMU", "Body Worn Inertial Sensors", "Wearer Motion", "Learning Based Pose Estimation", "Inertial Sensors", "Prescanned Surface Model", "Visual Only Approaches", "Https Sites Google Com Site Youngwooncha Egovip", "Wrist", "Three Dimensional Displays", "Telepresence", "Inertial Sensors", "Pose Estimation", "Prototypes", "Virtual Reality", "Computing Methodologies Computer Graphics Graphics Systems And Interfaces Virtual Reality", "Computing Methodologies Computer Graphics Animation Motion Capture", "Computing Methodologies Artificial Intelligence Computer Vision Reconstruction", "Computing Methodologies Machine Learning Machine Learning Approaches Neural Networks" ], "authors": [ { "affiliation": "University of North Carolina,Department of Computer Science,Chapel Hill", "fullName": "Young-Woon Cha", "givenName": "Young-Woon", "surname": "Cha", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina,Department of Computer Science,Chapel Hill", "fullName": "Husam Shaik", "givenName": "Husam", "surname": "Shaik", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina,Department of Computer Science,Chapel Hill", "fullName": "Qian Zhang", "givenName": "Qian", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina,Department of Computer Science,Chapel Hill", "fullName": "Fan Feng", "givenName": "Fan", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "InnerOptic Technology, Inc.", "fullName": "Andrei State", "givenName": "Andrei", "surname": "State", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina,Department of Computer Science,Chapel Hill", "fullName": "Adrian Ilie", "givenName": "Adrian", "surname": "Ilie", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina,Department of Computer Science,Chapel Hill", "fullName": "Henry Fuchs", "givenName": "Henry", "surname": "Fuchs", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "616-625", "year": "2021", "issn": null, "isbn": "978-1-6654-1838-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1tuAVNsm2QM", "name": "pvr202118380-09417771s1-mm_255600a616.zip", "size": "199 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417771s1-mm_255600a616.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "255600a606", "articleId": "1tuAGHy2cQ8", "__typename": "AdjacentArticleType" }, "next": { "fno": "255600a626", "articleId": "1tuAz6T7lXG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2015/6759/0/07301361", "title": "Off-the-shelf sensor integration for mono-SLAM on smart devices", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2015/07301361/12OmNqFrGwj", "parentPublication": { "id": "proceedings/cvprw/2015/6759/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bsn/2010/4065/0/4065a207", "title": "EcoIMU: A Dual Triaxial-Accelerometer Inertial Measurement Unit for Wearable Applications", "doi": null, "abstractUrl": "/proceedings-article/bsn/2010/4065a207/12OmNrAdsCX", "parentPublication": { "id": "proceedings/bsn/2010/4065/0", "title": "Wearable and Implantable Body Sensor Networks, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2012/4697/0/4697a114", "title": "Inertial Body-Worn Sensor Data Segmentation by Boosting Threshold-Based Detectors", "doi": null, "abstractUrl": "/proceedings-article/iswc/2012/4697a114/12OmNx7XGZJ", "parentPublication": { "id": "proceedings/iswc/2012/4697/0", "title": "2012 16th International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840b105", "title": "Real-Time Body Tracking with One Depth Camera and Inertial Sensors", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840b105/12OmNyyO8GX", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wevr/2015/1725/0/07151686", "title": "Portable virtual reality: Inertial measurements and biomechanics", "doi": null, "abstractUrl": "/proceedings-article/wevr/2015/07151686/12OmNzGDsMi", "parentPublication": { "id": "proceedings/wevr/2015/1725/0", "title": "2015 IEEE 1st Workshop on Everyday Virtual Reality (WEVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2017/4338/0/07917646", "title": "Inferring smartphone keypress via smartwatch inertial sensing", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2017/07917646/19wAME7Fui4", "parentPublication": { "id": "proceedings/percom-workshops/2017/4338/0", "title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smartcomp/2022/8152/0/815200a329", "title": "Accurate Horse Gait Event Estimation Using an Inertial Sensor Mounted on Different Body Locations", "doi": null, "abstractUrl": "/proceedings-article/smartcomp/2022/815200a329/1F0gFsw6yRi", "parentPublication": { "id": "proceedings/smartcomp/2022/8152/0", "title": "2022 IEEE International Conference on Smart Computing (SMARTCOMP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2022/8487/0/848700a128", "title": "Real-time Joint Angle Estimation using Mediapipe Framework and Inertial Sensors", "doi": null, "abstractUrl": "/proceedings-article/bibe/2022/848700a128/1J6hJGkP4nS", "parentPublication": { "id": "proceedings/bibe/2022/8487/0", "title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2021/04/08937008", "title": "Deep Neural Network Based Inertial Odometry Using Low-Cost Inertial Measurement Units", "doi": null, "abstractUrl": "/journal/tm/2021/04/08937008/1fTdV3t6L9m", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1HYvd6ycGYg", "title": "2022 18th European Dependable Computing Conference (EDCC)", "acronym": "edcc", "groupId": "1001308", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1HYvh8qjW5q", "doi": "10.1109/EDCC57035.2022.00004", "title": "Message from General Chairs", "normalizedTitle": "Message from General Chairs", "abstract": "Presents the conference keynote speech or messages from conference chairs.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the conference keynote speech or messages from conference chairs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the conference keynote speech or messages from conference chairs.", "fno": "740200z008", "keywords": [], "authors": [], "idPrefix": "edcc", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2022-09-01T00:00:00", "pubType": "proceedings", "pages": "8-9", "year": "2022", "issn": "2641-810X", "isbn": "978-1-6654-7402-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "740200z005", "articleId": "1HYve2mpEoU", "__typename": "AdjacentArticleType" }, "next": { "fno": "740200z010", "articleId": "1HYvdgWPJmM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acsos/2022/7137/0/713700z008", "title": "Message from the General Chairs", "doi": null, "abstractUrl": "/proceedings-article/acsos/2022/713700z008/1I1P1npe3cc", "parentPublication": { "id": "proceedings/acsos/2022/7137/0", "title": "2022 IEEE International Conference on Autonomic Computing and Self-Organizing Systems (ACSOS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acsos-c/2022/5142/0/514200z009", "title": "Message from the General Chairs", "doi": null, "abstractUrl": "/proceedings-article/acsos-c/2022/514200z009/1I1PhTi3OGA", "parentPublication": { "id": "proceedings/acsos-c/2022/5142/0", "title": "2022 IEEE International Conference on Autonomic Computing and Self-Organizing Systems Companion (ACSOS-C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aike/2022/7120/0/712000z008", "title": "Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/aike/2022/712000z008/1IbQn0yieek", "parentPublication": { "id": "proceedings/aike/2022/7120/0", "title": "2022 IEEE Fifth International Conference on Artificial Intelligence and Knowledge Engineering (AIKE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdiime/2022/9009/0/900900z010", "title": "Message from the General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icdiime/2022/900900z010/1Iz55IhnkWc", "parentPublication": { "id": "proceedings/icdiime/2022/9009/0", "title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloudcom/2022/6367/0/636700z009", "title": "Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/cloudcom/2022/636700z009/1JNqJBptkL6", "parentPublication": { "id": "proceedings/cloudcom/2022/6367/0", "title": "2022 IEEE International Conference on Cloud Computing Technology and Science (CloudCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mass/2022/7180/0/718000z020", "title": "Message from the General Chairs: MASS 2022", "doi": null, "abstractUrl": "/proceedings-article/mass/2022/718000z020/1JeEkJ946FG", "parentPublication": { "id": "proceedings/mass/2022/7180/0", "title": "2022 IEEE 19th International Conference on Mobile Ad Hoc and Smart Systems (MASS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbac-pad/2022/5155/0/515500z011", "title": "Message from the General Chairs", "doi": null, "abstractUrl": "/proceedings-article/sbac-pad/2022/515500z011/1JgrDioI3bG", "parentPublication": { "id": "proceedings/sbac-pad/2022/5155/0", "title": "2022 IEEE 34th International Symposium on Computer Architecture and High Performance Computing (SBAC-PAD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500z011", "title": "Message from the ISMAR 2022 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500z011/1JrQPtPjwfC", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2022/7172/0/717200z013", "title": "Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ism/2022/717200z013/1KaHKBvH38c", "parentPublication": { "id": "proceedings/ism/2022/7172/0", "title": "2022 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2022/7260/0/726000z016", "title": "Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/irc/2022/726000z016/1KckoeflEEE", "parentPublication": { "id": "proceedings/irc/2022/7260/0", "title": "2022 Sixth IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pK4LJdu6Na", "title": "2020 IEEE World Congress on Services (SERVICES)", "acronym": "services", "groupId": "1800492", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pK4NaARi4E", "doi": "10.1109/SERVICES48979.2020.00008", "title": "Message from General Chairs of IEEE AISA 2020", "normalizedTitle": "Message from General Chairs of IEEE AISA 2020", "abstract": "Message from General Chairs of IEEE AISA 2020", "abstracts": [ { "abstractType": "Regular", "content": "Message from General Chairs of IEEE AISA 2020", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Message from General Chairs of IEEE AISA 2020", "fno": "820300z024", "keywords": [], "authors": [ { "affiliation": "Beihang University, AISA General Co-Chair", "fullName": "Wenjun Wu", "givenName": "Wenjun", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University, AISA General Co-Chair", "fullName": "Huajun Chen", "givenName": "Huajun", "surname": "Chen", "__typename": "ArticleAuthorType" } ], "idPrefix": "services", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2020-10-01T00:00:00", "pubType": "proceedings", "pages": "1-1", "year": "2020", "issn": null, "isbn": "978-1-7281-8203-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "820300z023", "articleId": "1pK4Oh1BDgY", "__typename": "AdjacentArticleType" }, "next": { "fno": "820300z025", "articleId": "1pK4OHZdogU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/edge/2020/8254/0/825400z010", "title": "IEEE 2020 World Congress on Services Welcome Message from Congress 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/edge/2020/825400z010/1pDrgVvZ8Pu", "parentPublication": { "id": "proceedings/edge/2020/8254/0", "title": "2020 IEEE International Conference on Edge Computing (EDGE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edge/2020/8254/0/825400z022", "title": "IEEE International Conference on Edge Computing (EDGE 2020) Message from the Chairs", "doi": null, "abstractUrl": "/proceedings-article/edge/2020/825400z022/1pDrjxMzY08", "parentPublication": { "id": "proceedings/edge/2020/8254/0", "title": "2020 IEEE International Conference on Edge Computing (EDGE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/services/2020/8203/0/820300z015", "title": "IEEE 2020 World Congress on Services Welcome Message from Congress 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/services/2020/820300z015/1pK4N7d5IWY", "parentPublication": { "id": "proceedings/services/2020/8203/0", "title": "2020 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-seet/2020/7124/0/712400z008", "title": "Message from the ICSE 2020 General co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/icse-seet/2020/712400z008/1pVH7f1Cudy", "parentPublication": { "id": "proceedings/icse-seet/2020/7124/0", "title": "2020 IEEE/ACM 42nd International Conference on Software Engineering: Software Engineering Education and Training (ICSE-SEET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z040", "title": "Message from the IEEE SmartData 2020 Steering Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z040/1pVHhsxbdMQ", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z039", "title": "Message from the IEEE SmartData 2020 Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z039/1pVHllaLxxS", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z043", "title": "Message from the iThings 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z043/1pVHm7nJFde", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z038", "title": "Message from the IEEE SmartData 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z038/1pVHmlAfmHm", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cogmi/2020/4144/0/414400z009", "title": "Message from the IEEE CogMI 2020 General Chairs and PC Chairs CogMI 2020", "doi": null, "abstractUrl": "/proceedings-article/cogmi/2020/414400z009/1qyxQxjJQoU", "parentPublication": { "id": "proceedings/cogmi/2020/4144/0", "title": "2020 IEEE Second International Conference on Cognitive Machine Intelligence (CogMI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600z027", "title": "Message from the ICDM 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600z027/1r54zo9qwco", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pVHfYdzyZW", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "acronym": "ithings-greencom-cpscom-smartdata-cybermatics", "groupId": "1800308", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pVHhDiwaAw", "doi": "10.1109/iThings-GreenCom-CPSCom-SmartData-Cybermatics50389.2020.00010", "title": "Message from the GreenCom 2020 General Chairs and Program Chairs", "normalizedTitle": "Message from the GreenCom 2020 General Chairs and Program Chairs", "abstract": "Message from the GreenCom 2020 General Chairs and Program Chairs.", "abstracts": [ { "abstractType": "Regular", "content": "Message from the GreenCom 2020 General Chairs and Program Chairs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Message from the GreenCom 2020 General Chairs and Program Chairs.", "fno": "764700z035", "keywords": [], "authors": [], "idPrefix": "ithings-greencom-cpscom-smartdata-cybermatics", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "35-35", "year": "2020", "issn": null, "isbn": "978-1-7281-7647-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "764700z031", "articleId": "1pVHnq0nNNm", "__typename": "AdjacentArticleType" }, "next": { "fno": "764700z036", "articleId": "1pVHkB83Gp2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/services/2020/8203/0/820300z022", "title": "IEEE 2020 World Congress on Services Welcome Message from Workshops Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/services/2020/820300z022/1pK4MPQINIk", "parentPublication": { "id": "proceedings/services/2020/8203/0", "title": "2020 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/services/2020/8203/0/820300z024", "title": "Message from General Chairs of IEEE AISA 2020", "doi": null, "abstractUrl": "/proceedings-article/services/2020/820300z024/1pK4NaARi4E", "parentPublication": { "id": "proceedings/services/2020/8203/0", "title": "2020 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icws/2020/8786/0/878600z020", "title": "Message from the SERVICES 2020 Program Chairs in Chief", "doi": null, "abstractUrl": "/proceedings-article/icws/2020/878600z020/1pLJIRcYts4", "parentPublication": { "id": "proceedings/icws/2020/8786/0", "title": "2020 IEEE International Conference on Web Services (ICWS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-seet/2020/7124/0/712400z008", "title": "Message from the ICSE 2020 General co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/icse-seet/2020/712400z008/1pVH7f1Cudy", "parentPublication": { "id": "proceedings/icse-seet/2020/7124/0", "title": "2020 IEEE/ACM 42nd International Conference on Software Engineering: Software Engineering Education and Training (ICSE-SEET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z039", "title": "Message from the IEEE SmartData 2020 Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z039/1pVHllaLxxS", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z044", "title": "Message from the iThings 2020 Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z044/1pVHlp42A6I", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z043", "title": "Message from the iThings 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z043/1pVHm7nJFde", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z028", "title": "Message from the CPSCom 2020 Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z028/1pVHmb8d8K4", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cogmi/2020/4144/0/414400z009", "title": "Message from the IEEE CogMI 2020 General Chairs and PC Chairs CogMI 2020", "doi": null, "abstractUrl": "/proceedings-article/cogmi/2020/414400z009/1qyxQxjJQoU", "parentPublication": { "id": "proceedings/cogmi/2020/4144/0", "title": "2020 IEEE Second International Conference on Cognitive Machine Intelligence (CogMI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600z027", "title": "Message from the ICDM 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600z027/1r54zo9qwco", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pVHfYdzyZW", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "acronym": "ithings-greencom-cpscom-smartdata-cybermatics", "groupId": "1800308", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pVHlEea2Uo", "doi": "10.1109/iThings-GreenCom-CPSCom-SmartData-Cybermatics50389.2020.00005", "title": "Message from the CPSCom 2020 General Chairs", "normalizedTitle": "Message from the CPSCom 2020 General Chairs", "abstract": "Message from the CPSCom 2020 General Chairs.", "abstracts": [ { "abstractType": "Regular", "content": "Message from the CPSCom 2020 General Chairs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Message from the CPSCom 2020 General Chairs.", "fno": "764700z027", "keywords": [], "authors": [], "idPrefix": "ithings-greencom-cpscom-smartdata-cybermatics", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "27-27", "year": "2020", "issn": null, "isbn": "978-1-7281-7647-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "764700z026", "articleId": "1pVHiMBwBtm", "__typename": "AdjacentArticleType" }, "next": { "fno": "764700z028", "articleId": "1pVHmb8d8K4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/services/2020/8203/0/820300z024", "title": "Message from General Chairs of IEEE AISA 2020", "doi": null, "abstractUrl": "/proceedings-article/services/2020/820300z024/1pK4NaARi4E", "parentPublication": { "id": "proceedings/services/2020/8203/0", "title": "2020 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icws/2020/8786/0/878600z018", "title": "Welcome Message from Congress 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icws/2020/878600z018/1pLJJbxhsYw", "parentPublication": { "id": "proceedings/icws/2020/8786/0", "title": "2020 IEEE International Conference on Web Services (ICWS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z035", "title": "Message from the GreenCom 2020 General Chairs and Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z035/1pVHhDiwaAw", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z030", "title": "Message from the CPSCom 2020 Steering Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z030/1pVHi0FApBS", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z043", "title": "Message from the iThings 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z043/1pVHm7nJFde", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z028", "title": "Message from the CPSCom 2020 Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z028/1pVHmb8d8K4", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z038", "title": "Message from the IEEE SmartData 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z038/1pVHmlAfmHm", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z029", "title": "Message from the CPSCom 2020 Special Session Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z029/1pVHpmaFoOI", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cogmi/2020/4144/0/414400z009", "title": "Message from the IEEE CogMI 2020 General Chairs and PC Chairs CogMI 2020", "doi": null, "abstractUrl": "/proceedings-article/cogmi/2020/414400z009/1qyxQxjJQoU", "parentPublication": { "id": "proceedings/cogmi/2020/4144/0", "title": "2020 IEEE Second International Conference on Cognitive Machine Intelligence (CogMI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600z027", "title": "Message from the ICDM 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600z027/1r54zo9qwco", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pVHfYdzyZW", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "acronym": "ithings-greencom-cpscom-smartdata-cybermatics", "groupId": "1800308", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pVHm7nJFde", "doi": "10.1109/iThings-GreenCom-CPSCom-SmartData-Cybermatics50389.2020.00016", "title": "Message from the iThings 2020 General Chairs", "normalizedTitle": "Message from the iThings 2020 General Chairs", "abstract": "Message from the iThings 2020 General Chairs.", "abstracts": [ { "abstractType": "Regular", "content": "Message from the iThings 2020 General Chairs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Message from the iThings 2020 General Chairs.", "fno": "764700z043", "keywords": [], "authors": [], "idPrefix": "ithings-greencom-cpscom-smartdata-cybermatics", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "43-43", "year": "2020", "issn": null, "isbn": "978-1-7281-7647-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "764700z041", "articleId": "1pVHlPnliDu", "__typename": "AdjacentArticleType" }, "next": { "fno": "764700z044", "articleId": "1pVHlp42A6I", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/services/2020/8203/0/820300z024", "title": "Message from General Chairs of IEEE AISA 2020", "doi": null, "abstractUrl": "/proceedings-article/services/2020/820300z024/1pK4NaARi4E", "parentPublication": { "id": "proceedings/services/2020/8203/0", "title": "2020 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icws/2020/8786/0/878600z018", "title": "Welcome Message from Congress 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icws/2020/878600z018/1pLJJbxhsYw", "parentPublication": { "id": "proceedings/icws/2020/8786/0", "title": "2020 IEEE International Conference on Web Services (ICWS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-seet/2020/7124/0/712400z008", "title": "Message from the ICSE 2020 General co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/icse-seet/2020/712400z008/1pVH7f1Cudy", "parentPublication": { "id": "proceedings/icse-seet/2020/7124/0", "title": "2020 IEEE/ACM 42nd International Conference on Software Engineering: Software Engineering Education and Training (ICSE-SEET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z035", "title": "Message from the GreenCom 2020 General Chairs and Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z035/1pVHhDiwaAw", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z027", "title": "Message from the CPSCom 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z027/1pVHlEea2Uo", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z044", "title": "Message from the iThings 2020 Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z044/1pVHlp42A6I", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z038", "title": "Message from the IEEE SmartData 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z038/1pVHmlAfmHm", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z045", "title": "Message from the iThings 2020 Steering Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z045/1pVHovdlfgs", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cogmi/2020/4144/0/414400z009", "title": "Message from the IEEE CogMI 2020 General Chairs and PC Chairs CogMI 2020", "doi": null, "abstractUrl": "/proceedings-article/cogmi/2020/414400z009/1qyxQxjJQoU", "parentPublication": { "id": "proceedings/cogmi/2020/4144/0", "title": "2020 IEEE Second International Conference on Cognitive Machine Intelligence (CogMI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600z027", "title": "Message from the ICDM 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600z027/1r54zo9qwco", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qyxP4fUhfG", "title": "2020 IEEE Second International Conference on Cognitive Machine Intelligence (CogMI)", "acronym": "cogmi", "groupId": "1835524", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qyxQxjJQoU", "doi": "10.1109/CogMI50398.2020.00005", "title": "Message from the IEEE CogMI 2020 General Chairs and PC Chairs CogMI 2020", "normalizedTitle": "Message from the IEEE CogMI 2020 General Chairs and PC Chairs CogMI 2020", "abstract": "Message from the IEEE CogMI 2020 General Chairs and PC Chairs CogMI 2020", "abstracts": [ { "abstractType": "Regular", "content": "Message from the IEEE CogMI 2020 General Chairs and PC Chairs CogMI 2020", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Message from the IEEE CogMI 2020 General Chairs and PC Chairs CogMI 2020", "fno": "414400z009", "keywords": [], "authors": [], "idPrefix": "cogmi", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2020-10-01T00:00:00", "pubType": "proceedings", "pages": "1-1", "year": "2020", "issn": null, "isbn": "978-1-7281-4144-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "414400z005", "articleId": "1qyxQu9jqog", "__typename": "AdjacentArticleType" }, "next": { "fno": "414400z010", "articleId": "1qyxPAJh4Ri", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/edge/2020/8254/0/825400z010", "title": "IEEE 2020 World Congress on Services Welcome Message from Congress 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/edge/2020/825400z010/1pDrgVvZ8Pu", "parentPublication": { "id": "proceedings/edge/2020/8254/0", "title": "2020 IEEE International Conference on Edge Computing (EDGE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edge/2020/8254/0/825400z012", "title": "IEEE 2020 World Congress on Services Message from the Program Chairs in Chief", "doi": null, "abstractUrl": "/proceedings-article/edge/2020/825400z012/1pDrhn0eLMQ", "parentPublication": { "id": "proceedings/edge/2020/8254/0", "title": "2020 IEEE International Conference on Edge Computing (EDGE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edge/2020/8254/0/825400z022", "title": "IEEE International Conference on Edge Computing (EDGE 2020) Message from the Chairs", "doi": null, "abstractUrl": "/proceedings-article/edge/2020/825400z022/1pDrjxMzY08", "parentPublication": { "id": "proceedings/edge/2020/8254/0", "title": "2020 IEEE International Conference on Edge Computing (EDGE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/services/2020/8203/0/820300z022", "title": "IEEE 2020 World Congress on Services Welcome Message from Workshops Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/services/2020/820300z022/1pK4MPQINIk", "parentPublication": { "id": "proceedings/services/2020/8203/0", "title": "2020 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/services/2020/8203/0/820300z015", "title": "IEEE 2020 World Congress on Services Welcome Message from Congress 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/services/2020/820300z015/1pK4N7d5IWY", "parentPublication": { "id": "proceedings/services/2020/8203/0", "title": "2020 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/services/2020/8203/0/820300z024", "title": "Message from General Chairs of IEEE AISA 2020", "doi": null, "abstractUrl": "/proceedings-article/services/2020/820300z024/1pK4NaARi4E", "parentPublication": { "id": "proceedings/services/2020/8203/0", "title": "2020 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z040", "title": "Message from the IEEE SmartData 2020 Steering Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z040/1pVHhsxbdMQ", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z039", "title": "Message from the IEEE SmartData 2020 Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z039/1pVHllaLxxS", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z038", "title": "Message from the IEEE SmartData 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z038/1pVHmlAfmHm", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/euc/2020/1118/0/111800z007", "title": "Welcome Messages from IEEE EUC 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/euc/2020/111800z007/1qV6xU6KvXa", "parentPublication": { "id": "proceedings/euc/2020/1118/0", "title": "2020 IEEE 18th International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1r54vmgaSyY", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "acronym": "icdm", "groupId": "1000179", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1r54zo9qwco", "doi": "10.1109/ICDM50108.2020.00005", "title": "Message from the ICDM 2020 General Chairs", "normalizedTitle": "Message from the ICDM 2020 General Chairs", "abstract": "Message from the ICDM 2020 General Chairs", "abstracts": [ { "abstractType": "Regular", "content": "Message from the ICDM 2020 General Chairs", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Message from the ICDM 2020 General Chairs", "fno": "831600z027", "keywords": [], "authors": [], "idPrefix": "icdm", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "27-27", "year": "2020", "issn": null, "isbn": "978-1-7281-8316-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "831600z005", "articleId": "1r54DUnLSPm", "__typename": "AdjacentArticleType" }, "next": { "fno": "831600z028", "articleId": "1r54ynWIDTO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/services/2020/8203/0/820300z015", "title": "IEEE 2020 World Congress on Services Welcome Message from Congress 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/services/2020/820300z015/1pK4N7d5IWY", "parentPublication": { "id": "proceedings/services/2020/8203/0", "title": "2020 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/services/2020/8203/0/820300z024", "title": "Message from General Chairs of IEEE AISA 2020", "doi": null, "abstractUrl": "/proceedings-article/services/2020/820300z024/1pK4NaARi4E", "parentPublication": { "id": "proceedings/services/2020/8203/0", "title": "2020 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icws/2020/8786/0/878600z018", "title": "Welcome Message from Congress 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icws/2020/878600z018/1pLJJbxhsYw", "parentPublication": { "id": "proceedings/icws/2020/8786/0", "title": "2020 IEEE International Conference on Web Services (ICWS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-seet/2020/7124/0/712400z008", "title": "Message from the ICSE 2020 General co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/icse-seet/2020/712400z008/1pVH7f1Cudy", "parentPublication": { "id": "proceedings/icse-seet/2020/7124/0", "title": "2020 IEEE/ACM 42nd International Conference on Software Engineering: Software Engineering Education and Training (ICSE-SEET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z035", "title": "Message from the GreenCom 2020 General Chairs and Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z035/1pVHhDiwaAw", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z027", "title": "Message from the CPSCom 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z027/1pVHlEea2Uo", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z043", "title": "Message from the iThings 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z043/1pVHm7nJFde", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700z038", "title": "Message from the IEEE SmartData 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700z038/1pVHmlAfmHm", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cogmi/2020/4144/0/414400z009", "title": "Message from the IEEE CogMI 2020 General Chairs and PC Chairs CogMI 2020", "doi": null, "abstractUrl": "/proceedings-article/cogmi/2020/414400z009/1qyxQxjJQoU", "parentPublication": { "id": "proceedings/cogmi/2020/4144/0", "title": "2020 IEEE Second International Conference on Cognitive Machine Intelligence (CogMI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2020/9012/0/901200z021", "title": "Message from the ICDM 2020 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2020/901200z021/1rgGo7zlYyI", "parentPublication": { "id": "proceedings/icdmw/2020/9012/0", "title": "2020 International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1nkD9vorGs8", "title": "2020 2nd International Conference on Advances in Computer Technology, Information Science and Communications (CTISC)", "acronym": "ctisc", "groupId": "1838024", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1nkDbwZ5WBW", "doi": "10.1109/CTISC49998.2020.00005", "title": "Preface: CTISC 2020", "normalizedTitle": "Preface: CTISC 2020", "abstract": "The 2020 2nd International Conference on Advances in Computer Technology, Information Science and Communications (CTISC 2020) sponsored by the International Association of Applied Science and Engineering, Tokyo University of Science, Japan, and supported by University of Haute-Alsace, France and Kalbis Institute, Indonesia, was successfully held on July 10-12, 2020. CTISC 2020 proceeding centers on collecting the most up-to-date, comprehensive, and worldwide state-of- art knowledge and research in the related fields of Computer Technology, Information Science and Communications. All the accepted papers have been submitted to strict peer review by expert referees and selected based on originality, significance, and clarity for the conference. The proceeding consisting of 27 papers will be published by IEEE CS CPS (Conference Publishing Services).", "abstracts": [ { "abstractType": "Regular", "content": "The 2020 2nd International Conference on Advances in Computer Technology, Information Science and Communications (CTISC 2020) sponsored by the International Association of Applied Science and Engineering, Tokyo University of Science, Japan, and supported by University of Haute-Alsace, France and Kalbis Institute, Indonesia, was successfully held on July 10-12, 2020. CTISC 2020 proceeding centers on collecting the most up-to-date, comprehensive, and worldwide state-of- art knowledge and research in the related fields of Computer Technology, Information Science and Communications. All the accepted papers have been submitted to strict peer review by expert referees and selected based on originality, significance, and clarity for the conference. The proceeding consisting of 27 papers will be published by IEEE CS CPS (Conference Publishing Services).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The 2020 2nd International Conference on Advances in Computer Technology, Information Science and Communications (CTISC 2020) sponsored by the International Association of Applied Science and Engineering, Tokyo University of Science, Japan, and supported by University of Haute-Alsace, France and Kalbis Institute, Indonesia, was successfully held on July 10-12, 2020. CTISC 2020 proceeding centers on collecting the most up-to-date, comprehensive, and worldwide state-of- art knowledge and research in the related fields of Computer Technology, Information Science and Communications. All the accepted papers have been submitted to strict peer review by expert referees and selected based on originality, significance, and clarity for the conference. The proceeding consisting of 27 papers will be published by IEEE CS CPS (Conference Publishing Services).", "fno": "09203910", "keywords": [], "authors": [ { "affiliation": null, "fullName": "CTISC 2020 Organizing Committee", "givenName": null, "surname": null, "__typename": "ArticleAuthorType" } ], "idPrefix": "ctisc", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "i-i", "year": "2020", "issn": null, "isbn": "978-1-7281-6501-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09203927", "articleId": "1nkDbEnNIOc", "__typename": "AdjacentArticleType" }, "next": { "fno": "09203913", "articleId": "1nkDaCGh0Yw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icedme/2020/8145/0/09122134", "title": "ICEDME 2020 Preface", "doi": null, "abstractUrl": "/proceedings-article/icedme/2020/09122134/1kRSzR4en2U", "parentPublication": { "id": "proceedings/icedme/2020/8145/0", "title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icuems/2020/8832/0/09151524", "title": "Preface: ICUEMS 2020", "doi": null, "abstractUrl": "/proceedings-article/icuems/2020/09151524/1lRlUp2fvXO", "parentPublication": { "id": "proceedings/icuems/2020/8832/0", "title": "2020 International Conference on Urban Engineering and Management Science (ICUEMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccia/2020/6042/0/09178668", "title": "Preface: ICCIA 2020", "doi": null, "abstractUrl": "/proceedings-article/iccia/2020/09178668/1mDu48Sqy3K", "parentPublication": { "id": "proceedings/iccia/2020/6042/0", "title": "2020 5th International Conference on Computational Intelligence and Applications (ICCIA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbaie/2020/6499/0/09196481", "title": "Preface - ICBAIE 2020", "doi": null, "abstractUrl": "/proceedings-article/icbaie/2020/09196481/1n90W1bpVfO", "parentPublication": { "id": "proceedings/icbaie/2020/6499/0", "title": "2020 International Conference on Big Data, Artificial Intelligence and Internet of Things Engineering (ICBAIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icss/2020/8531/0/853100z003", "title": "2020 International Conference on Service Science ICSS 2020", "doi": null, "abstractUrl": "/proceedings-article/icss/2020/853100z003/1pDrahzLqgw", "parentPublication": { "id": "proceedings/icss/2020/8531/0", "title": "2020 International Conference on Service Science (ICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icss/2020/8531/0/853100z001", "title": "2020 International Conference on Service Science ICSS 2020", "doi": null, "abstractUrl": "/proceedings-article/icss/2020/853100z001/1pDrcUxdG4U", "parentPublication": { "id": "proceedings/icss/2020/8531/0", "title": "2020 International Conference on Service Science (ICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2020/9621/0/962100z018", "title": "FOCS 2020 Preface", "doi": null, "abstractUrl": "/proceedings-article/focs/2020/962100z018/1qyxtd1vJS0", "parentPublication": { "id": "proceedings/focs/2020/9621/0", "title": "2020 IEEE 61st Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isctt/2020/8575/0/857500z019", "title": "Preface", "doi": null, "abstractUrl": "/proceedings-article/isctt/2020/857500z019/1rHeRhUEDJu", "parentPublication": { "id": "proceedings/isctt/2020/8575/0", "title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icise/2020/2261/0/226100z022", "title": "Preface: icise-ie 2020", "doi": null, "abstractUrl": "/proceedings-article/icise/2020/226100z022/1tnYiHuHZPa", "parentPublication": { "id": "proceedings/icise/2020/2261/0", "title": "2020 International Conference on Information Science and Education (ICISE-IE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icris/2020/1969/0/196900z022", "title": "ICRIS 2020 Preface", "doi": null, "abstractUrl": "/proceedings-article/icris/2020/196900z022/1wG65cFDteg", "parentPublication": { "id": "proceedings/icris/2020/1969/0", "title": "2020 International Conference on Robots & Intelligent System (ICRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNylborE", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "12OmNzC5SEC", "doi": "10.1109/WACV.2018.00117", "title": "Object Detection in Real-Time Systems: Going Beyond Precision", "normalizedTitle": "Object Detection in Real-Time Systems: Going Beyond Precision", "abstract": "Applications like autonomous driving, industrial robotics, surveillance, and wearable assistive technology rely on object detectors as an integral part of the system. Thus, an increase in performance of object detectors directly affects the quality of such systems. In the recent years, convolutional neural networks (CNNs) and its variants emerged as the state of art in object detection, where performance is usually measured either in terms of mean average precision (mAP) or number of frames processed per second (fps). Many applications which use object detectors are resource constrained in practice. Even though it is clear from the published results, that a frame-level analysis of the system in terms of mAP or fps proves the superiority of one algorithm over the other, we observe that such metrics do not necessarily apply to real time applications with resource constraints. A slower algorithm even though highly accurate may need to drop frames to maintain the necessary frame rate and lose on the accuracy. We propose a closer look at the metrics used for performance in real-time applications, and suggest some new evaluation criterion. Our comparison of state of the art detectors on these metrics has also thrown some surprises in terms of conventional wisdom, which we present in this paper. Our framework is available at https://www.github.com/anupamsobti/object-detectionreal-time-systems.", "abstracts": [ { "abstractType": "Regular", "content": "Applications like autonomous driving, industrial robotics, surveillance, and wearable assistive technology rely on object detectors as an integral part of the system. Thus, an increase in performance of object detectors directly affects the quality of such systems. In the recent years, convolutional neural networks (CNNs) and its variants emerged as the state of art in object detection, where performance is usually measured either in terms of mean average precision (mAP) or number of frames processed per second (fps). Many applications which use object detectors are resource constrained in practice. Even though it is clear from the published results, that a frame-level analysis of the system in terms of mAP or fps proves the superiority of one algorithm over the other, we observe that such metrics do not necessarily apply to real time applications with resource constraints. A slower algorithm even though highly accurate may need to drop frames to maintain the necessary frame rate and lose on the accuracy. We propose a closer look at the metrics used for performance in real-time applications, and suggest some new evaluation criterion. Our comparison of state of the art detectors on these metrics has also thrown some surprises in terms of conventional wisdom, which we present in this paper. Our framework is available at https://www.github.com/anupamsobti/object-detectionreal-time-systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Applications like autonomous driving, industrial robotics, surveillance, and wearable assistive technology rely on object detectors as an integral part of the system. Thus, an increase in performance of object detectors directly affects the quality of such systems. In the recent years, convolutional neural networks (CNNs) and its variants emerged as the state of art in object detection, where performance is usually measured either in terms of mean average precision (mAP) or number of frames processed per second (fps). Many applications which use object detectors are resource constrained in practice. Even though it is clear from the published results, that a frame-level analysis of the system in terms of mAP or fps proves the superiority of one algorithm over the other, we observe that such metrics do not necessarily apply to real time applications with resource constraints. A slower algorithm even though highly accurate may need to drop frames to maintain the necessary frame rate and lose on the accuracy. We propose a closer look at the metrics used for performance in real-time applications, and suggest some new evaluation criterion. Our comparison of state of the art detectors on these metrics has also thrown some surprises in terms of conventional wisdom, which we present in this paper. Our framework is available at https://www.github.com/anupamsobti/object-detectionreal-time-systems.", "fno": "488601b020", "keywords": [ "Computer Vision", "Feedforward Neural Nets", "Object Detection", "Real Time Systems", "Object Detectors", "Frame Level Analysis", "Real Time Applications", "Object Detection", "Real Time Systems", "Wearable Assistive Technology", "Convolutional Neural Networks", "Frame Rate", "Mean Average Precision", "M AP", "Evaluation Criterion", "Detectors", "Measurement", "Object Detection", "Real Time Systems", "Autonomous Vehicles", "Hardware", "Robots" ], "authors": [ { "affiliation": null, "fullName": "Anupam Sobti", "givenName": "Anupam", "surname": "Sobti", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chetan Arora", "givenName": "Chetan", "surname": "Arora", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "M. Balakrishnan", "givenName": "M.", "surname": "Balakrishnan", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "1020-1028", "year": "2018", "issn": null, "isbn": "978-1-5386-4886-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "488601b011", "articleId": "12OmNzxgHv8", "__typename": "AdjacentArticleType" }, "next": { "fno": "488601b029", "articleId": "12OmNqIzgVJ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "mags/mi/2018/01/mmi2018010031", "title": "Domain-Specific Approximation for Object Detection", "doi": null, "abstractUrl": "/magazine/mi/2018/01/mmi2018010031/13rRUB7a18f", "parentPublication": { "id": "mags/mi", "title": "IEEE Micro", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200d500", "title": "Oriented R-CNN for Object Detection", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200d500/1BmENmkP732", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956710", "title": "Leveraging Synthetic Data in Object Detection on Unmanned Aerial Vehicles", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956710/1IHqyxEQQgg", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300a502", "title": "Gaussian YOLOv3: An Accurate and Fast Object Detector Using Localization Uncertainty for Autonomous Driving", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300a502/1hVlKGOjr1e", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300a573", "title": "A Delay Metric for Video Object Detection: What Average Precision Fails to Tell", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300a573/1hVlkkbewGk", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300g717", "title": "ThunderNet: Towards Real-Time Generic Object Detection on Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300g717/1hVlxmXwSPu", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093477", "title": "A One-and-Half Stage Pedestrian Detector", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093477/1jPbcGjRhAY", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800k0394", "title": "SaccadeNet: A Fast and Accurate Object Detector", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800k0394/1m3nl3qDagM", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800n3346", "title": "NETNet: Neighbor Erasing and Transferring Network for Better Single Shot Object Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800n3346/1m3o00mSuR2", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900p5227", "title": "The Translucent Patch: A Physical and Universal Attack on Object Detectors", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900p5227/1yeJCTAeWty", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hQqfuoOyHu", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hVlKGOjr1e", "doi": "10.1109/ICCV.2019.00059", "title": "Gaussian YOLOv3: An Accurate and Fast Object Detector Using Localization Uncertainty for Autonomous Driving", "normalizedTitle": "Gaussian YOLOv3: An Accurate and Fast Object Detector Using Localization Uncertainty for Autonomous Driving", "abstract": "The use of object detection algorithms is becoming increasingly important in autonomous vehicles, and object detection at high accuracy and a fast inference speed is essential for safe autonomous driving. A false positive (FP) from a false localization during autonomous driving can lead to fatal accidents and hinder safe and efficient driving. Therefore, a detection algorithm that can cope with mislocalizations is required in autonomous driving applications. This paper proposes a method for improving the detection accuracy while supporting a real-time operation by modeling the bounding box (bbox) of YOLOv3, which is the most representative of one-stage detectors, with a Gaussian parameter and redesigning the loss function. In addition, this paper proposes a method for predicting the localization uncertainty that indicates the reliability of bbox. By using the predicted localization uncertainty during the detection process, the proposed schemes can significantly reduce the FP and increase the true positive (TP), thereby improving the accuracy. Compared to a conventional YOLOv3, the proposed algorithm, Gaussian YOLOv3, improves the mean average precision (mAP) by 3.09 and 3.5 on the KITTI and Berkeley deep drive (BDD) datasets, respectively. Nevertheless, the proposed algorithm is capable of real-time detection at faster than 42 frames per second (fps) and shows a higher accuracy than previous approaches with a similar fps. Therefore, the proposed algorithm is the most suitable for autonomous driving applications.", "abstracts": [ { "abstractType": "Regular", "content": "The use of object detection algorithms is becoming increasingly important in autonomous vehicles, and object detection at high accuracy and a fast inference speed is essential for safe autonomous driving. A false positive (FP) from a false localization during autonomous driving can lead to fatal accidents and hinder safe and efficient driving. Therefore, a detection algorithm that can cope with mislocalizations is required in autonomous driving applications. This paper proposes a method for improving the detection accuracy while supporting a real-time operation by modeling the bounding box (bbox) of YOLOv3, which is the most representative of one-stage detectors, with a Gaussian parameter and redesigning the loss function. In addition, this paper proposes a method for predicting the localization uncertainty that indicates the reliability of bbox. By using the predicted localization uncertainty during the detection process, the proposed schemes can significantly reduce the FP and increase the true positive (TP), thereby improving the accuracy. Compared to a conventional YOLOv3, the proposed algorithm, Gaussian YOLOv3, improves the mean average precision (mAP) by 3.09 and 3.5 on the KITTI and Berkeley deep drive (BDD) datasets, respectively. Nevertheless, the proposed algorithm is capable of real-time detection at faster than 42 frames per second (fps) and shows a higher accuracy than previous approaches with a similar fps. Therefore, the proposed algorithm is the most suitable for autonomous driving applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The use of object detection algorithms is becoming increasingly important in autonomous vehicles, and object detection at high accuracy and a fast inference speed is essential for safe autonomous driving. A false positive (FP) from a false localization during autonomous driving can lead to fatal accidents and hinder safe and efficient driving. Therefore, a detection algorithm that can cope with mislocalizations is required in autonomous driving applications. This paper proposes a method for improving the detection accuracy while supporting a real-time operation by modeling the bounding box (bbox) of YOLOv3, which is the most representative of one-stage detectors, with a Gaussian parameter and redesigning the loss function. In addition, this paper proposes a method for predicting the localization uncertainty that indicates the reliability of bbox. By using the predicted localization uncertainty during the detection process, the proposed schemes can significantly reduce the FP and increase the true positive (TP), thereby improving the accuracy. Compared to a conventional YOLOv3, the proposed algorithm, Gaussian YOLOv3, improves the mean average precision (mAP) by 3.09 and 3.5 on the KITTI and Berkeley deep drive (BDD) datasets, respectively. Nevertheless, the proposed algorithm is capable of real-time detection at faster than 42 frames per second (fps) and shows a higher accuracy than previous approaches with a similar fps. Therefore, the proposed algorithm is the most suitable for autonomous driving applications.", "fno": "480300a502", "keywords": [ "Object Detection", "Road Safety", "Traffic Engineering Computing", "Object Detection Algorithms", "Gaussian YOL Ov 3", "Localization Uncertainty", "Gaussian Parameter", "One Stage Detectors", "Bounding Box", "Autonomous Driving Applications", "False Localization", "FP", "Safe Autonomous Driving", "Autonomous Vehicles", "Uncertainty", "Detectors", "Object Detection", "Real Time Systems", "Feature Extraction" ], "authors": [ { "affiliation": "Seoul National University", "fullName": "Jiwoong Choi", "givenName": "Jiwoong", "surname": "Choi", "__typename": "ArticleAuthorType" }, { "affiliation": "Seoul National University", "fullName": "Dayoung Chun", "givenName": "Dayoung", "surname": "Chun", "__typename": "ArticleAuthorType" }, { "affiliation": "Seoul National University of Science and Technology", "fullName": "Hyun Kim", "givenName": "Hyun", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "SNU", "fullName": "Hyuk-Jae Lee", "givenName": "Hyuk-Jae", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "502-511", "year": "2019", "issn": null, "isbn": "978-1-7281-4803-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "480300a491", "articleId": "1hQqlWfWR0c", "__typename": "AdjacentArticleType" }, "next": { "fno": "480300a512", "articleId": "1hQqspW3v4A", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/candar/2018/9182/0/918200a201", "title": "A Semi-Automatic Video Labeling Tool for Autonomous Driving Based on Multi-Object Detector and Tracker", "doi": null, "abstractUrl": "/proceedings-article/candar/2018/918200a201/17D45XeKgrU", "parentPublication": { "id": "proceedings/candar/2018/9182/0", "title": "2018 Sixth International Symposium on Computing and Networking (CANDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cacml/2022/8290/0/829000a462", "title": "H-YoLov3: High performance object detection applied to assisted driving", "doi": null, "abstractUrl": "/proceedings-article/cacml/2022/829000a462/1FY1f50A3eg", "parentPublication": { "id": "proceedings/cacml/2022/8290/0", "title": "2022 Asia Conference on Algorithms, Computing and Machine Learning (CACML)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2019/03/08817317", "title": "Multimedia for Autonomous Driving", "doi": null, "abstractUrl": "/magazine/mu/2019/03/08817317/1cPWP7sFJsI", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b836", "title": "Joint 3D Instance Segmentation and Object Detection for Autonomous Driving", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b836/1m3nhp3wVOM", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700a654", "title": "Research on a road target detection method based on improved YOLOv3", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700a654/1pVHhJUY8FO", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtss/2020/8324/0/832400a191", "title": "R-TOD: Real-Time Object Detector with Minimized End-to-End Delay for Autonomous Driving", "doi": null, "abstractUrl": "/proceedings-article/rtss/2020/832400a191/1rqEORhIFZ6", "parentPublication": { "id": "proceedings/rtss/2020/8324/0", "title": "2020 IEEE Real-Time Systems Symposium (RTSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2021/1219/0/121900a117", "title": "Testing Object Detection for Autonomous Driving Systems via 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2021/121900a117/1sET68WDvWg", "parentPublication": { "id": "proceedings/icse-companion/2021/1219/0/", "title": "2021 IEEE/ACM 43rd International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412687", "title": "Compression of YOLOv3 via Block-Wise and Channel-Wise Pruning for Real-Time and Complicated Autonomous Driving Environment Sensing Applications", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412687/1tmhCgxqZnW", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoias/2021/4195/0/419500a039", "title": "Research on Traffic Target Detection Method Based on Improved YOLOv3", "doi": null, "abstractUrl": "/proceedings-article/icoias/2021/419500a039/1wG6bpKd0wE", "parentPublication": { "id": "proceedings/icoias/2021/4195/0", "title": "2021 4th International Conference on Intelligent Autonomous Systems (ICoIAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaa/2021/3730/0/373000a542", "title": "An Improved Yolov3 Object Detection Algorithm for UAV Aerial Images", "doi": null, "abstractUrl": "/proceedings-article/icaa/2021/373000a542/1zL1RfZOGly", "parentPublication": { "id": "proceedings/icaa/2021/3730/0", "title": "2021 International Conference on Intelligent Computing, Automation and Applications (ICAA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3o75on8VG", "doi": "10.1109/CVPR42600.2020.01373", "title": "Physically Realizable Adversarial Examples for LiDAR Object Detection", "normalizedTitle": "Physically Realizable Adversarial Examples for LiDAR Object Detection", "abstract": "Modern autonomous driving systems rely heavily on deep learning models to process point cloud sensory data; meanwhile, deep models have been shown to be susceptible to adversarial attacks with visually imperceptible perturbations. Despite the fact that this poses a security concern for the self-driving industry, there has been very little exploration in terms of 3D perception, as most adversarial attacks have only been applied to 2D flat images. In this paper, we address this issue and present a method to generate universal 3D adversarial objects to fool LiDAR detectors. In particular, we demonstrate that placing an adversarial object on the rooftop of any target vehicle to hide the vehicle entirely from LiDAR detectors with a success rate of 80%. We report attack results on a suite of detectors using various input representation of point clouds. We also conduct a pilot study on adversarial defense using data augmentation. This is one step closer towards safer self-driving under unseen conditions from limited training data.", "abstracts": [ { "abstractType": "Regular", "content": "Modern autonomous driving systems rely heavily on deep learning models to process point cloud sensory data; meanwhile, deep models have been shown to be susceptible to adversarial attacks with visually imperceptible perturbations. Despite the fact that this poses a security concern for the self-driving industry, there has been very little exploration in terms of 3D perception, as most adversarial attacks have only been applied to 2D flat images. In this paper, we address this issue and present a method to generate universal 3D adversarial objects to fool LiDAR detectors. In particular, we demonstrate that placing an adversarial object on the rooftop of any target vehicle to hide the vehicle entirely from LiDAR detectors with a success rate of 80%. We report attack results on a suite of detectors using various input representation of point clouds. We also conduct a pilot study on adversarial defense using data augmentation. This is one step closer towards safer self-driving under unseen conditions from limited training data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Modern autonomous driving systems rely heavily on deep learning models to process point cloud sensory data; meanwhile, deep models have been shown to be susceptible to adversarial attacks with visually imperceptible perturbations. Despite the fact that this poses a security concern for the self-driving industry, there has been very little exploration in terms of 3D perception, as most adversarial attacks have only been applied to 2D flat images. In this paper, we address this issue and present a method to generate universal 3D adversarial objects to fool LiDAR detectors. In particular, we demonstrate that placing an adversarial object on the rooftop of any target vehicle to hide the vehicle entirely from LiDAR detectors with a success rate of 80%. We report attack results on a suite of detectors using various input representation of point clouds. We also conduct a pilot study on adversarial defense using data augmentation. This is one step closer towards safer self-driving under unseen conditions from limited training data.", "fno": "716800n3713", "keywords": [ "Computer Vision", "Image Classification", "Image Sensors", "Learning Artificial Intelligence", "Object Detection", "Optical Radar", "Traffic Engineering Computing", "Adversarial Defense", "Data Augmentation", "Safer Self Driving", "Physically Realizable Adversarial Examples", "Li DAR Object Detection", "Modern Autonomous Driving Systems", "Deep Learning Models", "Point Cloud Sensory Data", "Deep Models", "Adversarial Attacks", "Visually Imperceptible Perturbations", "Security Concern", "2 D Flat Images", "Universal 3 D Adversarial Objects", "Li DAR Detectors", "Adversarial Object", "Attack Results", "Point Clouds", "Three Dimensional Displays", "Laser Radar", "Detectors", "Autonomous Vehicles", "Solid Modeling", "Laser Theory" ], "authors": [ { "affiliation": "Uber ATG", "fullName": "James Tu", "givenName": "James", "surname": "Tu", "__typename": "ArticleAuthorType" }, { "affiliation": "Uber ATG; University of Toronto", "fullName": "Mengye Ren", "givenName": "Mengye", "surname": "Ren", "__typename": "ArticleAuthorType" }, { "affiliation": "Uber ATG; University of Toronto", "fullName": "Sivabalan Manivasagam", "givenName": "Sivabalan", "surname": "Manivasagam", "__typename": "ArticleAuthorType" }, { "affiliation": "Uber ATG", "fullName": "Ming Liang", "givenName": "Ming", "surname": "Liang", "__typename": "ArticleAuthorType" }, { "affiliation": "Uber ATG; University of Toronto", "fullName": "Bin Yang", "givenName": "Bin", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Princeton University", "fullName": "Richard Du", "givenName": "Richard", "surname": "Du", "__typename": "ArticleAuthorType" }, { "affiliation": "Uber ATG; University of Toronto", "fullName": "Frank Cheng", "givenName": "Frank", "surname": "Cheng", "__typename": "ArticleAuthorType" }, { "affiliation": "Uber ATG; University of Toronto", "fullName": "Raquel Urtasun", "givenName": "Raquel", "surname": "Urtasun", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "13713-13722", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800n3703", "articleId": "1m3nH7oHLvW", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800n3723", "articleId": "1m3nzqChdJe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2018/6420/0/642000f870", "title": "LiDAR-Video Driving Dataset: Learning Driving Policies Effectively", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000f870/17D45WXIkzk", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200h878", "title": "Fooling LiDAR Perception via Adversarial Trajectory Perturbation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200h878/1BmKjS69hrq", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2019/0990/0/08909897", "title": "Multi-Threshold based Ground Detection for Point Cloud Scene", "doi": null, "abstractUrl": "/proceedings-article/avss/2019/08909897/1febN0D1yCs", "parentPublication": { "id": "proceedings/avss/2019/0990/0", "title": "2019 16th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300c422", "title": "On Control Transitions in Autonomous Driving: A Framework and Analysis for Characterizing Scene Complexity", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300c422/1i5mAMqcXwk", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci*cc/2019/1419/0/09146070", "title": "Towards Computationally-Efficient Cognitive Sensor Systems for Autonomous Vehicles", "doi": null, "abstractUrl": "/proceedings-article/icci*cc/2019/09146070/1lFJei5G9Wg", "parentPublication": { "id": "proceedings/icci*cc/2019/1419/0", "title": "2019 IEEE 18th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b689", "title": "Depth Sensing Beyond LiDAR Range", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b689/1m3o7FHToFq", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsd/2020/9535/0/09217855", "title": "Enabling Fail-Operational Behavior and Degradation for Safety-Critical Automotive 3D Flash LiDAR Systems", "doi": null, "abstractUrl": "/proceedings-article/dsd/2020/09217855/1nLbJw0X1JK", "parentPublication": { "id": "proceedings/dsd/2020/9535/0", "title": "2020 23rd Euromicro Conference on Digital System Design (DSD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a753", "title": "PanoNet3D: Combining Semantic and Geometric Understanding for LiDAR Point Cloud Detection", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a753/1qyxjGFgneM", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h542", "title": "LiDAR R-CNN: An Efficient and Universal 3D Object Detector", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h542/1yeIgfBLnI4", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900f721", "title": "RSN: Range Sparse Net for Efficient, Accurate LiDAR 3D Object Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900f721/1yeLfjmzCy4", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1mA9UgUshri", "title": "2020 IEEE International Conference On Artificial Intelligence Testing (AITest)", "acronym": "aitest", "groupId": "1831724", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1mA9ULGvTMY", "doi": "10.1109/AITEST49225.2020.00011", "title": "Coverage based testing for V&#x0026;V and Safety Assurance of Self-driving Autonomous Vehicles: A Systematic Literature Review", "normalizedTitle": "Coverage based testing for V&V and Safety Assurance of Self-driving Autonomous Vehicles: A Systematic Literature Review", "abstract": "Self-driving Autonomous Vehicles (SAVs) are gaining more interest each passing day by the industry as well as the general public. Tech and automobile companies are investing huge amounts of capital in research and development of SAVs to make sure they have a head start in the SAV market in the future. One of the major hurdles in the way of SAVs making it to the public roads is the lack of confidence of public in the safety aspect of SAVs. In order to assure safety and provide confidence to the public in the safety of SAVs, researchers around the world have used coverage-based testing for Verification and Validation (V&amp;V) and safety assurance of SAVs. The objective of this paper is to investigate the coverage criteria proposed and coverage maximizing techniques used by researchers in the last decade up till now, to assure safety of SAVs. We conduct a Systematic Literature Review (SLR) for this investigation in our paper. We present a classification of existing research based on the coverage criteria used. Several research gaps and research directions are also provided in this SLR to enable further research in this domain. This paper provides a body of knowledge in the domain of safety assurance of SAVs. We believe the results of this SLR will be helpful in the progression of V&amp;V and safety assurance of SAVs.", "abstracts": [ { "abstractType": "Regular", "content": "Self-driving Autonomous Vehicles (SAVs) are gaining more interest each passing day by the industry as well as the general public. Tech and automobile companies are investing huge amounts of capital in research and development of SAVs to make sure they have a head start in the SAV market in the future. One of the major hurdles in the way of SAVs making it to the public roads is the lack of confidence of public in the safety aspect of SAVs. In order to assure safety and provide confidence to the public in the safety of SAVs, researchers around the world have used coverage-based testing for Verification and Validation (V&amp;V) and safety assurance of SAVs. The objective of this paper is to investigate the coverage criteria proposed and coverage maximizing techniques used by researchers in the last decade up till now, to assure safety of SAVs. We conduct a Systematic Literature Review (SLR) for this investigation in our paper. We present a classification of existing research based on the coverage criteria used. Several research gaps and research directions are also provided in this SLR to enable further research in this domain. This paper provides a body of knowledge in the domain of safety assurance of SAVs. We believe the results of this SLR will be helpful in the progression of V&amp;V and safety assurance of SAVs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Self-driving Autonomous Vehicles (SAVs) are gaining more interest each passing day by the industry as well as the general public. Tech and automobile companies are investing huge amounts of capital in research and development of SAVs to make sure they have a head start in the SAV market in the future. One of the major hurdles in the way of SAVs making it to the public roads is the lack of confidence of public in the safety aspect of SAVs. In order to assure safety and provide confidence to the public in the safety of SAVs, researchers around the world have used coverage-based testing for Verification and Validation (V&V) and safety assurance of SAVs. The objective of this paper is to investigate the coverage criteria proposed and coverage maximizing techniques used by researchers in the last decade up till now, to assure safety of SAVs. We conduct a Systematic Literature Review (SLR) for this investigation in our paper. We present a classification of existing research based on the coverage criteria used. Several research gaps and research directions are also provided in this SLR to enable further research in this domain. This paper provides a body of knowledge in the domain of safety assurance of SAVs. We believe the results of this SLR will be helpful in the progression of V&V and safety assurance of SAVs.", "fno": "09176839", "keywords": [ "Automobiles", "Formal Verification", "Intelligent Transportation Systems", "Mobile Robots", "Program Testing", "Road Safety", "Safety Critical Software", "Coverage Based Testing", "Verification And Validation", "Self Driving Autonomous Vehicles", "Automobile Companies", "SAV Safety Assurance", "Safety", "Testing", "Autonomous Vehicles", "Automobiles", "Autonomous Automobiles", "Accidents", "Safety Assurance", "V X 0026 V", "Self Driving Cars", "Autonomous Vehicles", "Coverage Criteria" ], "authors": [ { "affiliation": "University of York,Department of Computer Science,York,UK,YO105GH", "fullName": "Zaid Tahir", "givenName": "Zaid", "surname": "Tahir", "__typename": "ArticleAuthorType" }, { "affiliation": "University of York,Department of Computer Science,York,UK,YO105GH", "fullName": "Rob Alexander", "givenName": "Rob", "surname": "Alexander", "__typename": "ArticleAuthorType" } ], "idPrefix": "aitest", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-08-01T00:00:00", "pubType": "proceedings", "pages": "23-30", "year": "2020", "issn": null, "isbn": "978-1-7281-6984-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09176830", "articleId": "1mA9Weczcsw", "__typename": "AdjacentArticleType" }, "next": { "fno": "09176833", "articleId": "1mA9Wn65ObS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isorc/2018/5847/0/584701a130", "title": "Intrusion-Tolerant Autonomous Driving", "doi": null, "abstractUrl": "/proceedings-article/isorc/2018/584701a130/12OmNARAndt", "parentPublication": { "id": "proceedings/isorc/2018/5847/0", "title": "2018 IEEE 21st International Symposium on Real-Time Distributed Computing (ISORC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/issrew/2017/2387/0/2387a032", "title": "IV&V Case: Empirical Study of Software Independent Verification and Validation Based on Safety Case", "doi": null, "abstractUrl": "/proceedings-article/issrew/2017/2387a032/12OmNC0PGMv", "parentPublication": { "id": "proceedings/issrew/2017/2387/0", "title": "2017 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/issrew/2016/3601/0/3601a249", "title": "Safety Assurance for Emergent Collaboration of Open Distributed Systems", "doi": null, "abstractUrl": "/proceedings-article/issrew/2016/3601a249/12OmNqJZgMS", "parentPublication": { "id": "proceedings/issrew/2016/3601/0", "title": "2016 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icst/2013/4968/0/4968a094", "title": "Classification, Structuring, and Assessment of Evidence for Safety -- A Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/icst/2013/4968a094/12OmNylboyb", "parentPublication": { "id": "proceedings/icst/2013/4968/0", "title": "2013 IEEE Sixth International Conference on Software Testing, Verification and Validation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2022/1647/0/09767514", "title": "Demo: Distracted Driving Detection", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2022/09767514/1Df87KUPDqM", "parentPublication": { "id": "proceedings/percom-workshops/2022/1647/0", "title": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tase/2019/3342/0/334200a128", "title": "A Quantitative Safety Verification Approach for the Decision-making Process of Autonomous Driving", "doi": null, "abstractUrl": "/proceedings-article/tase/2019/334200a128/1fpNButzfBm", "parentPublication": { "id": "proceedings/tase/2019/3342/0", "title": "2019 International Symposium on Theoretical Aspects of Software Engineering (TASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icst/2020/5778/0/09159061", "title": "Generating Avoidable Collision Scenarios for Testing Autonomous Driving Systems", "doi": null, "abstractUrl": "/proceedings-article/icst/2020/09159061/1m3oPFVKuZ2", "parentPublication": { "id": "proceedings/icst/2020/5778/0", "title": "2020 IEEE 13th International Conference on Software Testing, Validation and Verification (ICST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2021/10/09172045", "title": "Guardauto: A Decentralized Runtime Protection System for Autonomous Driving", "doi": null, "abstractUrl": "/journal/tc/2021/10/09172045/1mrN5uXI7zq", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cic/2020/4146/0/414600z018", "title": "Industry Plenary Panel: Autonomous Driving: Practical Challenges &#x0026; Opportunities", "doi": null, "abstractUrl": "/proceedings-article/cic/2020/414600z018/1qyxLarn344", "parentPublication": { "id": "proceedings/cic/2020/4146/0", "title": "2020 IEEE 6th International Conference on Collaboration and Internet Computing (CIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2021/08/09504681", "title": "Safety, Complexity, and Automated Driving: Holistic Perspectives on Safety Assurance", "doi": null, "abstractUrl": "/magazine/co/2021/08/09504681/1vLAu4VeqHe", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeJCTAeWty", "doi": "10.1109/CVPR46437.2021.01498", "title": "The Translucent Patch: A Physical and Universal Attack on Object Detectors", "normalizedTitle": "The Translucent Patch: A Physical and Universal Attack on Object Detectors", "abstract": "Physical adversarial attacks against object detectors have seen increasing success in recent years. However, these attacks require direct access to the object of interest in order to apply a physical patch. Furthermore, to hide multiple objects, an adversarial patch must be applied to each object. In this paper, we propose a contactless translucent physical patch containing a carefully constructed pattern, which is placed on the camera&#x2019;s lens, to fool state-of-the-art object detectors. The primary goal of our patch is to hide all instances of a selected target class. In addition, the optimization method used to construct the patch aims to ensure that the detection of other (untargeted) classes remains unharmed. Therefore, in our experiments, which are conducted on state-of-the-art object detection models used in autonomous driving, we study the effect of the patch on the detection of both the selected target class and the other classes. We show that our patch was able to prevent the detection of 42.27% of all stop sign instances while maintaining high (nearly 80%) detection of the other classes.", "abstracts": [ { "abstractType": "Regular", "content": "Physical adversarial attacks against object detectors have seen increasing success in recent years. However, these attacks require direct access to the object of interest in order to apply a physical patch. Furthermore, to hide multiple objects, an adversarial patch must be applied to each object. In this paper, we propose a contactless translucent physical patch containing a carefully constructed pattern, which is placed on the camera&#x2019;s lens, to fool state-of-the-art object detectors. The primary goal of our patch is to hide all instances of a selected target class. In addition, the optimization method used to construct the patch aims to ensure that the detection of other (untargeted) classes remains unharmed. Therefore, in our experiments, which are conducted on state-of-the-art object detection models used in autonomous driving, we study the effect of the patch on the detection of both the selected target class and the other classes. We show that our patch was able to prevent the detection of 42.27% of all stop sign instances while maintaining high (nearly 80%) detection of the other classes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Physical adversarial attacks against object detectors have seen increasing success in recent years. However, these attacks require direct access to the object of interest in order to apply a physical patch. Furthermore, to hide multiple objects, an adversarial patch must be applied to each object. In this paper, we propose a contactless translucent physical patch containing a carefully constructed pattern, which is placed on the camera’s lens, to fool state-of-the-art object detectors. The primary goal of our patch is to hide all instances of a selected target class. In addition, the optimization method used to construct the patch aims to ensure that the detection of other (untargeted) classes remains unharmed. Therefore, in our experiments, which are conducted on state-of-the-art object detection models used in autonomous driving, we study the effect of the patch on the detection of both the selected target class and the other classes. We show that our patch was able to prevent the detection of 42.27% of all stop sign instances while maintaining high (nearly 80%) detection of the other classes.", "fno": "450900p5227", "keywords": [ "Object Detection", "Optimisation", "Object Detection Models", "Selected Target Class", "State Of The Art Object Detectors", "Carefully Constructed Pattern", "Contactless Translucent Physical Patch", "Adversarial Patch", "Multiple Objects", "Physical Adversarial Attacks", "Translucent Patch", "Computer Vision", "Face Recognition", "Optimization Methods", "Detectors", "Object Detection", "Cameras", "Autonomous Vehicles" ], "authors": [ { "affiliation": "Ben-Gurion University of the Negev,Department of Software and Information Systems Engineering", "fullName": "Alon Zolfi", "givenName": "Alon", "surname": "Zolfi", "__typename": "ArticleAuthorType" }, { "affiliation": "Ben-Gurion University of the Negev,Department of Software and Information Systems Engineering", "fullName": "Moshe Kravchik", "givenName": "Moshe", "surname": "Kravchik", "__typename": "ArticleAuthorType" }, { "affiliation": "Ben-Gurion University of the Negev,Department of Software and Information Systems Engineering", "fullName": "Yuval Elovici", "givenName": "Yuval", "surname": "Elovici", "__typename": "ArticleAuthorType" }, { "affiliation": "Ben-Gurion University of the Negev,Department of Software and Information Systems Engineering", "fullName": "Asaf Shabtai", "givenName": "Asaf", "surname": "Shabtai", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "15227-15236", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "450900p5217", "articleId": "1yeJUlBIMLe", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900p5237", "articleId": "1yeIdhkpb9e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032d420", "title": "Incremental Learning of Object Detectors without Catastrophic Forgetting", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d420/12OmNBLdKMW", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459352", "title": "Patch-based within-object classification", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459352/12OmNvTTc7a", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/09749837", "title": "Deeply Unsupervised Patch Re-Identification for Pre-training Object Detectors", "doi": null, "abstractUrl": "/journal/tp/5555/01/09749837/1CkdRsuJV5K", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600o4953", "title": "Segment and Complete: Defending Object Detectors against Adversarial Patch Attacks with Robust Patch Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600o4953/1H1nh1Xh7S8", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsc/2022/7480/0/748000a126", "title": "An Asterisk-shaped Patch Attack for Object Detection", "doi": null, "abstractUrl": "/proceedings-article/dsc/2022/748000a126/1H44pOmCESs", "parentPublication": { "id": "proceedings/dsc/2022/7480/0", "title": "2022 7th IEEE International Conference on Data Science in Cyberspace (DSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sp/2023/9336/0/933600b366", "title": "ObjectSeeker: Certifiably Robust Object Detection against Patch Hiding Attacks via Patch-agnostic Masking", "doi": null, "abstractUrl": "/proceedings-article/sp/2023/933600b366/1Js0DTKQIBW", "parentPublication": { "id": "proceedings/sp/2023/9336/0/", "title": "2023 IEEE Symposium on Security and Privacy (SP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aipr/2022/7729/0/10092200", "title": "Detecting Physical Adversarial Patch Attacks with Object Detectors", "doi": null, "abstractUrl": "/proceedings-article/aipr/2022/10092200/1MepJuFu7WU", "parentPublication": { "id": "proceedings/aipr/2022/7729/0", "title": "2022 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600a049", "title": "Fooling Automated Surveillance Cameras: Adversarial Patches to Attack Person Detection", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600a049/1iTvlxAH5qo", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428443", "title": "Rpattack: Refined Patch Attack on General Object Detectors", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428443/1uim8hgJEVq", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/spw/2021/3732/0/373200a243", "title": "Demo: Security of Camera-based Perception for Autonomous Driving under Adversarial Attack", "doi": null, "abstractUrl": "/proceedings-article/spw/2021/373200a243/1v56n9UUvh6", "parentPublication": { "id": "proceedings/spw/2021/3732/0", "title": "2021 IEEE Security and Privacy Workshops (SPW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KxTT0RGtCo", "title": "2022 IEEE International Conference on Knowledge Graph (ICKG)", "acronym": "ickg", "groupId": "1821544", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1KxU0VArAty", "doi": "10.1109/ICKG55886.2022.00032", "title": "Unsupervised DeepView: Global Explainability of Uncertainties for High Dimensional Data", "normalizedTitle": "Unsupervised DeepView: Global Explainability of Uncertainties for High Dimensional Data", "abstract": "In recent years, more and more visualization methods for explanations of artificial intelligence have been proposed that focus on untangling black box models for single instances of the data set. While the focus often lies on supervised learning algorithms, the study of uncertainty estimations in the unsupervised domain for high-dimensional data sets in the explainability domain has been neglected so far. As a result, existing visualization methods struggle to visualize global uncertainty patterns over whole datasets. We propose Unsupervised DeepView, the first global uncertainty visualization method for high dimensional data based on a novel unsupervised proxy for local uncertainties. In this paper, we exploit the mathematical notion of local intrinsic dimensionality as a measure of local data complexity. As a label-agnostic measure of model uncertainty in unsupervised machine learning, it shows two highly desirable features: It can be used for global structure visualization as well as for the detection of local adversarials. In our empirical evaluation, we demonstrate its ability both in visualizations and quantitative analysis for unsupervised models on multiple datasets.", "abstracts": [ { "abstractType": "Regular", "content": "In recent years, more and more visualization methods for explanations of artificial intelligence have been proposed that focus on untangling black box models for single instances of the data set. While the focus often lies on supervised learning algorithms, the study of uncertainty estimations in the unsupervised domain for high-dimensional data sets in the explainability domain has been neglected so far. As a result, existing visualization methods struggle to visualize global uncertainty patterns over whole datasets. We propose Unsupervised DeepView, the first global uncertainty visualization method for high dimensional data based on a novel unsupervised proxy for local uncertainties. In this paper, we exploit the mathematical notion of local intrinsic dimensionality as a measure of local data complexity. As a label-agnostic measure of model uncertainty in unsupervised machine learning, it shows two highly desirable features: It can be used for global structure visualization as well as for the detection of local adversarials. In our empirical evaluation, we demonstrate its ability both in visualizations and quantitative analysis for unsupervised models on multiple datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In recent years, more and more visualization methods for explanations of artificial intelligence have been proposed that focus on untangling black box models for single instances of the data set. While the focus often lies on supervised learning algorithms, the study of uncertainty estimations in the unsupervised domain for high-dimensional data sets in the explainability domain has been neglected so far. As a result, existing visualization methods struggle to visualize global uncertainty patterns over whole datasets. We propose Unsupervised DeepView, the first global uncertainty visualization method for high dimensional data based on a novel unsupervised proxy for local uncertainties. In this paper, we exploit the mathematical notion of local intrinsic dimensionality as a measure of local data complexity. As a label-agnostic measure of model uncertainty in unsupervised machine learning, it shows two highly desirable features: It can be used for global structure visualization as well as for the detection of local adversarials. In our empirical evaluation, we demonstrate its ability both in visualizations and quantitative analysis for unsupervised models on multiple datasets.", "fno": "510100a196", "keywords": [ "Artificial Intelligence", "Data Visualisation", "Learning Artificial Intelligence", "Supervised Learning", "Unsupervised Learning", "Artificial Intelligence", "Explainability Domain", "Global Structure Visualization", "Global Uncertainty Patterns", "Global Uncertainty Visualization Method", "High Dimensional Data", "High Dimensional Data Sets", "Highly Desirable Features", "Local Data Complexity", "Local Intrinsic Dimensionality", "Local Uncertainties", "Model Uncertainty", "Novel Unsupervised Proxy", "Single Instances", "Supervised Learning Algorithms", "Uncertainty Estimations", "Unsupervised Deep View", "Unsupervised Domain", "Unsupervised Machine Learning", "Unsupervised Models", "Untangling Black Box Models", "Visualization Methods Struggle", "Uncertainty", "Statistical Analysis", "Supervised Learning", "Measurement Uncertainty", "Data Visualization", "Estimation", "Machine Learning", "Visualization", "Unsupervised Learning", "Uncertainty Quantification", "Adversarials" ], "authors": [ { "affiliation": "Research Center Trustworthy Data Science and Security, TU Dortmund", "fullName": "Carina Newen", "givenName": "Carina", "surname": "Newen", "__typename": "ArticleAuthorType" }, { "affiliation": "Research Center Trustworthy Data Science and Security, TU Dortmund", "fullName": "Emmanuel Müller", "givenName": "Emmanuel", "surname": "Müller", "__typename": "ArticleAuthorType" } ], "idPrefix": "ickg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-11-01T00:00:00", "pubType": "proceedings", "pages": "196-202", "year": "2022", "issn": null, "isbn": "978-1-6654-5101-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "510100a188", "articleId": "1KxU3LuDs8o", "__typename": "AdjacentArticleType" }, "next": { "fno": "510100a203", "articleId": "1KxTXyo4IVy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdm/2010/4256/0/4256a304", "title": "Exploiting Local Data Uncertainty to Boost Global Outlier Detection", "doi": null, "abstractUrl": "/proceedings-article/icdm/2010/4256a304/12OmNBSSVbO", "parentPublication": { "id": "proceedings/icdm/2010/4256/0", "title": "2010 IEEE International Conference on Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2013/5108/0/5108a131", "title": "Local and Global Discriminative Learning for Unsupervised Feature Selection", "doi": null, "abstractUrl": "/proceedings-article/icdm/2013/5108a131/12OmNx2QUFS", "parentPublication": { "id": "proceedings/icdm/2013/5108/0", "title": "2013 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2021/2398/0/239800a699", "title": "Global Convolutional Neural Processes", "doi": null, "abstractUrl": "/proceedings-article/icdm/2021/239800a699/1AqxhsHe0mI", "parentPublication": { "id": "proceedings/icdm/2021/2398/0", "title": "2021 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500b555", "title": "Uncertainty Learning towards Unsupervised Deformable Medical Image Registration", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500b555/1B13zaPGj7y", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2022/04/09714847", "title": "Enhanced Local and Global Learning for Rotation-Invariant Point Cloud Representation", "doi": null, "abstractUrl": "/magazine/mu/2022/04/09714847/1B2CYdfkBnG", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859983", "title": "Learning Multiple Granularity Features for Unsupervised Person Re-Identification", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859983/1G9ENsraaju", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2022/4609/0/460900a626", "title": "Unsupervised DeepView: Global Uncertainty Visualization for High Dimensional Data", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2022/460900a626/1KBr5pVl2qA", "parentPublication": { "id": "proceedings/icdmw/2022/4609/0", "title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isctt/2020/8575/0/857500a482", "title": "High-Performance Blind Spectrum Sensing: An Iterative Unsupervised Learning Approach", "doi": null, "abstractUrl": "/proceedings-article/isctt/2020/857500a482/1rHeOnuAhP2", "parentPublication": { "id": "proceedings/isctt/2020/8575/0", "title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428094", "title": "Deep Unsupervised Hashing by Global and Local Consistency", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428094/1uilWCcJo08", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceai/2021/3960/0/396000a314", "title": "Fine and Coarse-Grained Feature Learning for Unsupervised Person Re-identification", "doi": null, "abstractUrl": "/proceedings-article/icceai/2021/396000a314/1xqyIze05qw", "parentPublication": { "id": "proceedings/icceai/2021/3960/0", "title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvlPkDu", "title": "2015 8th International Conference on Control and Automation (CA)", "acronym": "ca", "groupId": "1805764", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNx4yvFA", "doi": "10.1109/CA.2015.19", "title": "A Novel Dynamic Surface Control Based on State Observer", "normalizedTitle": "A Novel Dynamic Surface Control Based on State Observer", "abstract": "The traditional surface control method is developed in this paper which is based on the state observer on line and this method is applied in saturated water-steam temperature system. Certain first-order low-pass filters are introduced into the designing process to avoid the occurrence of high-order derivatives of elements in the system which makes it easy to implement in practical applications. The stabilization of this control method is verified. Simulation results show that the dynamic surface control method still ensures an accurate result, even if the loads change in a great and parameters of the controlled plant change significantly.", "abstracts": [ { "abstractType": "Regular", "content": "The traditional surface control method is developed in this paper which is based on the state observer on line and this method is applied in saturated water-steam temperature system. Certain first-order low-pass filters are introduced into the designing process to avoid the occurrence of high-order derivatives of elements in the system which makes it easy to implement in practical applications. The stabilization of this control method is verified. Simulation results show that the dynamic surface control method still ensures an accurate result, even if the loads change in a great and parameters of the controlled plant change significantly.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The traditional surface control method is developed in this paper which is based on the state observer on line and this method is applied in saturated water-steam temperature system. Certain first-order low-pass filters are introduced into the designing process to avoid the occurrence of high-order derivatives of elements in the system which makes it easy to implement in practical applications. The stabilization of this control method is verified. Simulation results show that the dynamic surface control method still ensures an accurate result, even if the loads change in a great and parameters of the controlled plant change significantly.", "fno": "9857a038", "keywords": [ "Low Pass Filters", "Observers", "Stability", "Dynamic Surface Control", "State Observer", "Surface Control Method", "Saturated Water Steam Temperature System", "First Order Low Pass Filters", "High Order Derivatives", "Stabilization", "Controlled Plant Change", "Temperature Control", "Surface Treatment", "Process Control", "Observers", "Three Dimensional Displays", "PD Control", "Dynamic Surface Control", "Third Order Dynamics", "One Order Filter" ], "authors": [ { "affiliation": "Coll. of Inf. Sci. & Eng., Northeastern Univ., Shenyang, China", "fullName": "Yu Hongxia", "givenName": "Yu", "surname": "Hongxia", "__typename": "ArticleAuthorType" }, { "affiliation": "Coll. of Inf. Sci. & Eng., Northeastern Univ., Shenyang, China", "fullName": "Yuanwei Jing", "givenName": "Yuanwei", "surname": "Jing", "__typename": "ArticleAuthorType" }, { "affiliation": "Coll. of Inf. Sci. & Eng., Northeastern Univ., Shenyang, China", "fullName": "Zhang Siying", "givenName": "Zhang", "surname": "Siying", "__typename": "ArticleAuthorType" } ], "idPrefix": "ca", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-11-01T00:00:00", "pubType": "proceedings", "pages": "38-41", "year": "2015", "issn": null, "isbn": "978-1-5090-0397-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "9857a033", "articleId": "12OmNBPtJB2", "__typename": "AdjacentArticleType" }, "next": { "fno": "9857a042", "articleId": "12OmNCyTyqG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2017/0733/0/0733b735", "title": "Surface Normal Reconstruction from Specular Information in Light Field Data", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b735/12OmNAP1YZr", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2016/4400/0/4400a054", "title": "Image Representation of Rational Surface Based on Tensor Product Form", "doi": null, "abstractUrl": "/proceedings-article/icdh/2016/4400a054/12OmNApu5Dv", "parentPublication": { "id": "proceedings/icdh/2016/4400/0", "title": "2016 6th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fskd/2009/3735/4/3735d216", "title": "A Control Method of Optimized Tendency Surface and its Application", "doi": null, "abstractUrl": "/proceedings-article/fskd/2009/3735d216/12OmNBeRtR9", "parentPublication": { "id": "proceedings/fskd/2009/3735/4", "title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2014/4261/0/4261a497", "title": "Five-Axis Numerical Control Processing Application and Surface Quality Control of HyperMILL-Based Mobile Phone Mold Cavity", "doi": null, "abstractUrl": "/proceedings-article/isdea/2014/4261a497/12OmNCmGNUb", "parentPublication": { "id": "proceedings/isdea/2014/4261/0", "title": "2014 Fifth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imccc/2016/1195/0/07774915", "title": "Double Power Nonsingular Sliding Mode Guidance Law Based on Super-Twisting Disturbance Observer", "doi": null, "abstractUrl": "/proceedings-article/imccc/2016/07774915/12OmNyk2ZYo", "parentPublication": { "id": "proceedings/imccc/2016/1195/0", "title": "2016 Sixth International Conference on Instrumentation & Measurement, Computer, Communication and Control (IMCCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032f400", "title": "Intrinsic 3D Dynamic Surface Tracking based on Dynamic Ricci Flow and Teichm&#xfc;ller Map", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032f400/12OmNyv7mb8", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/05/06605689", "title": "Geodesic Mapping for Dynamic Surface Alignment", "doi": null, "abstractUrl": "/journal/tp/2014/05/06605689/13rRUNvPLaQ", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545715", "title": "Layered Surface Detection for Virtual Unrolling", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545715/17D45XlyDv8", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceai/2022/6803/0/680300a655", "title": "Extended State Observer Based Sliding Mode Control for a Class of Dynamic Systems with Input Constraint", "doi": null, "abstractUrl": "/proceedings-article/icceai/2022/680300a655/1FUUs2jzITC", "parentPublication": { "id": "proceedings/icceai/2022/6803/0", "title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceai/2022/6803/0/680300a651", "title": "Extended State Observer Based Nonsingular Terminal Sliding Mode Control for a Class of Dynamic Systems", "doi": null, "abstractUrl": "/proceedings-article/icceai/2022/680300a651/1FUVHNQAl1K", "parentPublication": { "id": "proceedings/icceai/2022/6803/0", "title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H2petWxAqI", "title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)", "acronym": "cost", "groupId": "1847867", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H2psc5a9EI", "doi": "10.1109/CoST57098.2022.00038", "title": "Implementation of the Interaction Effect Among Virtual Large Curved Screens on Multiple Buildings Based on Mixed Reality", "normalizedTitle": "Implementation of the Interaction Effect Among Virtual Large Curved Screens on Multiple Buildings Based on Mixed Reality", "abstract": "The glass-free three dimensional (3D) large screen has provided an ultra-sensory immersive visual experience, and broken the aesthetic fatigue of people to print advertising. But it&#x2019;s also limited by the geographical location and high cost of installation, accordingly, the virtual large curved screens on multiple buildings with interaction effect based on mixed reality (MR) was designed and implemented in this paper. Firstly, the deep learning building detection and building surface recognition algorithm were adopted to recognize the building surfaces, and the recognized surfaces were further filtered and integrated, which then completed the positioning of multiple buildings virtual large screens based on the integrated multiple plane groups. Based on the simultaneous rendering of multiple virtual curved screens, several interaction modes were also designed between the large screen spaces, thus furtherly enhancing the user experience and application potential of the virtual curved screens. The overall program built modularly based on 3D engine achieves better recognition, rendering and interaction effects in mobile, which also provides new ideas for new forms of MR applications in urban environments.", "abstracts": [ { "abstractType": "Regular", "content": "The glass-free three dimensional (3D) large screen has provided an ultra-sensory immersive visual experience, and broken the aesthetic fatigue of people to print advertising. But it&#x2019;s also limited by the geographical location and high cost of installation, accordingly, the virtual large curved screens on multiple buildings with interaction effect based on mixed reality (MR) was designed and implemented in this paper. Firstly, the deep learning building detection and building surface recognition algorithm were adopted to recognize the building surfaces, and the recognized surfaces were further filtered and integrated, which then completed the positioning of multiple buildings virtual large screens based on the integrated multiple plane groups. Based on the simultaneous rendering of multiple virtual curved screens, several interaction modes were also designed between the large screen spaces, thus furtherly enhancing the user experience and application potential of the virtual curved screens. The overall program built modularly based on 3D engine achieves better recognition, rendering and interaction effects in mobile, which also provides new ideas for new forms of MR applications in urban environments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The glass-free three dimensional (3D) large screen has provided an ultra-sensory immersive visual experience, and broken the aesthetic fatigue of people to print advertising. But it’s also limited by the geographical location and high cost of installation, accordingly, the virtual large curved screens on multiple buildings with interaction effect based on mixed reality (MR) was designed and implemented in this paper. Firstly, the deep learning building detection and building surface recognition algorithm were adopted to recognize the building surfaces, and the recognized surfaces were further filtered and integrated, which then completed the positioning of multiple buildings virtual large screens based on the integrated multiple plane groups. Based on the simultaneous rendering of multiple virtual curved screens, several interaction modes were also designed between the large screen spaces, thus furtherly enhancing the user experience and application potential of the virtual curved screens. The overall program built modularly based on 3D engine achieves better recognition, rendering and interaction effects in mobile, which also provides new ideas for new forms of MR applications in urban environments.", "fno": "624800a140", "keywords": [ "Data Visualisation", "Rendering Computer Graphics", "Virtual Reality", "Interaction Effects", "Interaction Effect", "Virtual Large Curved Screens", "Mixed Reality", "Glass Free", "Dimensional Large Screen", "Ultra Sensory Immersive Visual Experience", "Deep Learning Building Detection", "Building Surface Recognition Algorithm", "Building Surfaces", "Recognized Surfaces", "Multiple Buildings Virtual Large Screens", "Integrated Multiple Plane Groups", "Multiple Virtual Curved Screens", "Interaction Modes", "Screen Spaces", "Rendering", "Visualization", "Three Dimensional Displays", "Costs", "Buildings", "Urban Areas", "Mixed Reality", "Virtual Reality", "Large Curved Screen", "Mixed Reality", "Plane Detection" ], "authors": [ { "affiliation": "University of China,School of Information and Communication Engineering Communication,Beijing,China", "fullName": "Yilan Zhou", "givenName": "Yilan", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "University of China,School of Information and Communication Engineering Communication,Beijing,China", "fullName": "Yue Zhang", "givenName": "Yue", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of China,School of Information and Communication Engineering Communication,Beijing,China", "fullName": "Shanzhen Lan", "givenName": "Shanzhen", "surname": "Lan", "__typename": "ArticleAuthorType" }, { "affiliation": "University of China,School of Information and Communication Engineering Communication,Beijing,China", "fullName": "Shaobin Li", "givenName": "Shaobin", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "cost", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "140-145", "year": "2022", "issn": null, "isbn": "978-1-6654-6248-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "624800a134", "articleId": "1H2pflxjZ3W", "__typename": "AdjacentArticleType" }, "next": { "fno": "624800a146", "articleId": "1H2plIhFWcE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vs-games/2016/2722/0/07590358", "title": "Procedural Modeling in Archaeology: Approximating Ionic Style Columns for Games", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2016/07590358/12OmNB0nWbl", "parentPublication": { "id": "proceedings/vs-games/2016/2722/0", "title": "2016 8th International Conference on Games and Virtual Worlds for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvvrhc/1998/8283/0/82830078", "title": "Vision and Graphics in Producing Mixed Reality Worlds", "doi": null, "abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1", "parentPublication": { "id": "proceedings/cvvrhc/1998/8283/0", "title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isar/2001/1375/0/13750169", "title": "MR2 (MR Square): A Mixed-Reality Meeting Room", "doi": null, "abstractUrl": "/proceedings-article/isar/2001/13750169/12OmNzyYibC", "parentPublication": { "id": "proceedings/isar/2001/1375/0", "title": "Proceedings IEEE and ACM International Symposium on Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cste/2022/8188/0/818800a082", "title": "Integrating Inquiry-Based Pedagogy with Mixed Reality: Theories and Practices", "doi": null, "abstractUrl": "/proceedings-article/cste/2022/818800a082/1J7VZM9bxDi", "parentPublication": { "id": "proceedings/cste/2022/8188/0", "title": "2022 4th International Conference on Computer Science and Technologies in Education (CSTE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a566", "title": "A cup of coffee in Mixed Reality: analysis of movements&#x0027; smoothness from real to virtual", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a566/1J7Waw7xSy4", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a657", "title": "Mixed Reality for Engineering Design Review Using Finite Element Analysis", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a657/1J7WwCL6CCQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798101", "title": "Mixed Reality in Art Education", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798101/1cJ0RtUtRgk", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798180", "title": "Architectural Design in Virtual Reality and Mixed Reality Environments: A Comparative Analysis", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798180/1cJ1bDktgoU", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icedme/2020/8145/0/09122173", "title": "Settlement Analysis and Calculation for Additionally Installed Elevators in Existing Residential Buildings in Shanghai", "doi": null, "abstractUrl": "/proceedings-article/icedme/2020/09122173/1kRSH1LHCpy", "parentPublication": { "id": "proceedings/icedme/2020/8145/0", "title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icid/2020/1481/0/440500a033", "title": "Application of BIM Technology in the Fire Protection System of Ancient Buildings in the Intelligent Era", "doi": null, "abstractUrl": "/proceedings-article/icid/2020/440500a033/1taFrszFtok", "parentPublication": { "id": "proceedings/icid/2020/1481/0", "title": "2020 International Conference on Intelligent Design (ICID)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tnWwqMuCzu", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tnY4fj5Ykg", "doi": "10.1109/VRW52623.2021.00149", "title": "Evaluation of Curved Raycasting-based Interactive Surfaces in Virtual Environments", "normalizedTitle": "Evaluation of Curved Raycasting-based Interactive Surfaces in Virtual Environments", "abstract": "As 3D user interfaces become more popular, quick and reliable aerial selection and manipulation are desired. We evaluated a virtual curved interactive surface with controllable curvature based on raycasting. The user operates the surface by pointing using a head-mounted display and grip-type controller. We investigated the operation speed and accuracy of curved interactive surfaces under various presentation conditions. To investigate the users' operation ability for different curved conditions, we experimented with multiple surface curvature radii, including completely flat conditions. The experimental results showed that varying the curvature of the display improved the pointing accuracy by 28% and the speed by 15% over the flat surface in the most effective cases. These findings can be applied to curved interactive surfaces with mid-air pointing for 2D-style applications such as GIS and photo browsers.", "abstracts": [ { "abstractType": "Regular", "content": "As 3D user interfaces become more popular, quick and reliable aerial selection and manipulation are desired. We evaluated a virtual curved interactive surface with controllable curvature based on raycasting. The user operates the surface by pointing using a head-mounted display and grip-type controller. We investigated the operation speed and accuracy of curved interactive surfaces under various presentation conditions. To investigate the users' operation ability for different curved conditions, we experimented with multiple surface curvature radii, including completely flat conditions. The experimental results showed that varying the curvature of the display improved the pointing accuracy by 28% and the speed by 15% over the flat surface in the most effective cases. These findings can be applied to curved interactive surfaces with mid-air pointing for 2D-style applications such as GIS and photo browsers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As 3D user interfaces become more popular, quick and reliable aerial selection and manipulation are desired. We evaluated a virtual curved interactive surface with controllable curvature based on raycasting. The user operates the surface by pointing using a head-mounted display and grip-type controller. We investigated the operation speed and accuracy of curved interactive surfaces under various presentation conditions. To investigate the users' operation ability for different curved conditions, we experimented with multiple surface curvature radii, including completely flat conditions. The experimental results showed that varying the curvature of the display improved the pointing accuracy by 28% and the speed by 15% over the flat surface in the most effective cases. These findings can be applied to curved interactive surfaces with mid-air pointing for 2D-style applications such as GIS and photo browsers.", "fno": "405700a534", "keywords": [ "Data Visualisation", "Helmet Mounted Displays", "Rendering Computer Graphics", "User Interfaces", "Virtual Reality", "Virtual Curved Interactive Surface", "Controllable Curvature", "Head Mounted Display", "Grip Type Controller", "Operation Speed", "Curved Interactive Surfaces", "Presentation Conditions", "Users", "Different Curved Conditions", "Multiple Surface Curvature Radii", "Completely Flat Conditions", "Pointing Accuracy", "Flat Surface", "Curved Raycasting Based Interactive Surfaces", "Virtual Environments", "Three Dimensional Displays", "Head Mounted Displays", "Conferences", "Virtual Environments", "User Interfaces", "Time Measurement", "Browsers", "Human Centered Computing", "Virtual Reality", "Displays And Imagers" ], "authors": [ { "affiliation": "Nikon Corporation", "fullName": "Tomomi Takashina", "givenName": "Tomomi", "surname": "Takashina", "__typename": "ArticleAuthorType" }, { "affiliation": "Nikon Corporation", "fullName": "Mitsuru Ito", "givenName": "Mitsuru", "surname": "Ito", "__typename": "ArticleAuthorType" }, { "affiliation": "Nikon Systems Inc", "fullName": "Hitoshi Nagaura", "givenName": "Hitoshi", "surname": "Nagaura", "__typename": "ArticleAuthorType" }, { "affiliation": "Nikon Systems Inc", "fullName": "Eisuke Wakabayashi", "givenName": "Eisuke", "surname": "Wakabayashi", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "534-535", "year": "2021", "issn": null, "isbn": "978-1-6654-4057-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1tnY45mQOWc", "name": "pvrw202140570-09419109s1-mm_405700a534.zip", "size": "20.2 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419109s1-mm_405700a534.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "405700a532", "articleId": "1tnXy7NpnGg", "__typename": "AdjacentArticleType" }, "next": { "fno": "405700a536", "articleId": "1tnXBidgc48", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icoip/2010/4252/1/4252a623", "title": "Influence of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252a623/12OmNAoUTj0", "parentPublication": { "id": "proceedings/icoip/2010/4252/2", "title": "Optoelectronics and Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1990/2057/0/00139535", "title": "Determining back-facing curved model surfaces by analysis at the boundary", "doi": null, "abstractUrl": "/proceedings-article/iccv/1990/00139535/12OmNBcj5Cm", "parentPublication": { "id": "proceedings/iccv/1990/2057/0", "title": "Proceedings Third International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1990/2057/0/00139537", "title": "Representing surface curvature discontinuities on curved surfaces", "doi": null, "abstractUrl": "/proceedings-article/iccv/1990/00139537/12OmNvT2peK", "parentPublication": { "id": "proceedings/iccv/1990/2057/0", "title": "Proceedings Third International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nems/2009/4629/0/05068596", "title": "Diffraction angle prediction of surface micromachined planar curved grating and crossed grating", "doi": null, "abstractUrl": "/proceedings-article/nems/2009/05068596/12OmNvoWUXu", "parentPublication": { "id": "proceedings/nems/2009/4629/0", "title": "International Conference on Nano/Micro Engineered and Molecular Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1995/7042/0/70420269", "title": "Motion from the frontier of curved surfaces", "doi": null, "abstractUrl": "/proceedings-article/iccv/1995/70420269/12OmNwtEEH5", "parentPublication": { "id": "proceedings/iccv/1995/7042/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206624", "title": "Recovering specular surfaces using curved line images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206624/12OmNxG1yX3", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2012/4772/0/4772a218", "title": "Analysis of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens", "doi": null, "abstractUrl": "/proceedings-article/icdma/2012/4772a218/12OmNz2kqrR", "parentPublication": { "id": "proceedings/icdma/2012/4772/0", "title": "2012 Third International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/1971/06/01671906", "title": "Continuous Shading of Curved Surfaces", "doi": null, "abstractUrl": "/journal/tc/1971/06/01671906/13rRUwgQpBP", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1992/01/mcg1992010054", "title": "Locally Manipulating the Geometry of Curved Surfaces", "doi": null, "abstractUrl": "/magazine/cg/1992/01/mcg1992010054/13rRUygT7cy", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089461", "title": "Reading on 3D Surfaces in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089461/1jIxbelqcbC", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1x3kek8UTe0", "title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)", "acronym": "icisce", "groupId": "1807704", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1x3kCJdq8ZW", "doi": "10.1109/ICISCE50968.2020.00238", "title": "Research on Laser Measurement of 3D curved Glass Contour based on computer Vision", "normalizedTitle": "Research on Laser Measurement of 3D curved Glass Contour based on computer Vision", "abstract": "In the mobile phone glass industry, curved glass is widely used. Whether the curved glass is measured with high efficiency and precision and whether the curved edge profile error can be evaluated directly affects the quality and service cycle of glass products. In view of the difficulty of accurate measurement and evaluation of 3D mobile phones curved glass profile, a visual measurement method of 3D mobile phones curved glass profile error based on laser scanning and shape template matching is proposed in this paper. Accurate measurement of mobile phone curved glass greatly improves the fit between curved glass and mobile phone and the efficiency of production. Therefore, this paper proposes a method for evaluating the contour degree of curved glass on mobile phones based on Open Cascade and OpenCV. This method uses the lasers and camera equipment to extract the profile outline of the curved glass, then extract the profile outline of the mobile phone curved glass model that has been drawn in 3D software. The extracted contour lines are matched by methods such as translation and rotation, and finally the distance between the corresponding points is calculated as the contour error. Experimental results show that the method is able to accurately measure the profile error and greatly reduce the measurement instability and have good robustness.", "abstracts": [ { "abstractType": "Regular", "content": "In the mobile phone glass industry, curved glass is widely used. Whether the curved glass is measured with high efficiency and precision and whether the curved edge profile error can be evaluated directly affects the quality and service cycle of glass products. In view of the difficulty of accurate measurement and evaluation of 3D mobile phones curved glass profile, a visual measurement method of 3D mobile phones curved glass profile error based on laser scanning and shape template matching is proposed in this paper. Accurate measurement of mobile phone curved glass greatly improves the fit between curved glass and mobile phone and the efficiency of production. Therefore, this paper proposes a method for evaluating the contour degree of curved glass on mobile phones based on Open Cascade and OpenCV. This method uses the lasers and camera equipment to extract the profile outline of the curved glass, then extract the profile outline of the mobile phone curved glass model that has been drawn in 3D software. The extracted contour lines are matched by methods such as translation and rotation, and finally the distance between the corresponding points is calculated as the contour error. Experimental results show that the method is able to accurately measure the profile error and greatly reduce the measurement instability and have good robustness.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In the mobile phone glass industry, curved glass is widely used. Whether the curved glass is measured with high efficiency and precision and whether the curved edge profile error can be evaluated directly affects the quality and service cycle of glass products. In view of the difficulty of accurate measurement and evaluation of 3D mobile phones curved glass profile, a visual measurement method of 3D mobile phones curved glass profile error based on laser scanning and shape template matching is proposed in this paper. Accurate measurement of mobile phone curved glass greatly improves the fit between curved glass and mobile phone and the efficiency of production. Therefore, this paper proposes a method for evaluating the contour degree of curved glass on mobile phones based on Open Cascade and OpenCV. This method uses the lasers and camera equipment to extract the profile outline of the curved glass, then extract the profile outline of the mobile phone curved glass model that has been drawn in 3D software. The extracted contour lines are matched by methods such as translation and rotation, and finally the distance between the corresponding points is calculated as the contour error. Experimental results show that the method is able to accurately measure the profile error and greatly reduce the measurement instability and have good robustness.", "fno": "640600b173", "keywords": [ "Cameras", "CCD Image Sensors", "Computer Vision", "Displacement Measurement", "Edge Detection", "Feature Extraction", "Image Matching", "Machine Tools", "Mobile Handsets", "Mobile Phone Glass Industry", "Curved Edge Profile Error", "Glass Products", "Mobile Phone Curved Glass Model", "Computer Vision", "Three Dimensional Displays", "Shape", "Shape Measurement", "Measurement Uncertainty", "Measurement By Laser Beam", "Glass", "Curved Glass", "Contour Measurement", "Profile Error", "Matching", "Laser Scanning" ], "authors": [ { "affiliation": "Wuhan Institute of Technology,Hubei Key Laboratory of Optical Information and Pattern Recognition, Hubei Research Centre of Video Image and High Definition Projection,Wuhan,China", "fullName": "Peng Wang", "givenName": "Peng", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Wuhan Institute of Technology,Hubei Key Laboratory of Optical Information and Pattern Recognition, Hubei Research Centre of Video Image and High Definition Projection,Wuhan,China", "fullName": "Jinmeng Wu", "givenName": "Jinmeng", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Wuhan Institute of Technology,Hubei Key Laboratory of Optical Information and Pattern Recognition, Hubei Research Centre of Video Image and High Definition Projection,Wuhan,China", "fullName": "Guilin Yan", "givenName": "Guilin", "surname": "Yan", "__typename": "ArticleAuthorType" }, { "affiliation": "Wuhan Institute of Technology,Hubei Key Laboratory of Optical Information and Pattern Recognition, Hubei Research Centre of Video Image and High Definition Projection,Wuhan,China", "fullName": "Shuo Bian", "givenName": "Shuo", "surname": "Bian", "__typename": "ArticleAuthorType" } ], "idPrefix": "icisce", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "1173-1177", "year": "2020", "issn": null, "isbn": "978-1-7281-6406-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "640600b168", "articleId": "1x3kobIP1Kg", "__typename": "AdjacentArticleType" }, "next": { "fno": "640600b178", "articleId": "1x3l9ZF7EZy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2004/8603/3/01394670", "title": "Stained-glass visualization for highly condensed video summaries", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394670/12OmNBSSV87", "parentPublication": { "id": "proceedings/icme/2004/8603/3", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2010/4286/1/4286a156", "title": "Adaptive Depth Measurement System of Bi-component Insulating Glass Sealing Robot", "doi": null, "abstractUrl": "/proceedings-article/icdma/2010/4286a156/12OmNzJbR2s", "parentPublication": { "id": "proceedings/icdma/2010/4286/1", "title": "2010 International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/06/v1547", "title": "Image-Based Stained Glass", "doi": null, "abstractUrl": "/journal/tg/2006/06/v1547/13rRUx0Pqpr", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2019/9245/0/924500a246", "title": "Particle Filter Localization Using Visual Markers Based Omnidirectional Vision and a Laser Sensor", "doi": null, "abstractUrl": "/proceedings-article/irc/2019/924500a246/18M7f37ayLm", "parentPublication": { "id": "proceedings/irc/2019/9245/0", "title": "2019 Third IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2612", "title": "Glass Segmentation using Intensity and Spectral Polarization Cues", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2612/1H0Nbg27Op2", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/igsc/2019/5416/0/08957173", "title": "Energy-Oriented Designs of an Augmented-Reality Application on a VUZIX Blade Smart Glass", "doi": null, "abstractUrl": "/proceedings-article/igsc/2019/08957173/1gAueBLhzBS", "parentPublication": { "id": "proceedings/igsc/2019/5416/0", "title": "2019 Tenth International Green and Sustainable Computing Conference (IGSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvidl/2020/9481/0/948100a090", "title": "Adaptive Visual Inspection Method for Transparent Label Defect Detection of Curved Glass Bottle", "doi": null, "abstractUrl": "/proceedings-article/cvidl/2020/948100a090/1pbe8FvD0L6", "parentPublication": { "id": "proceedings/cvidl/2020/9481/0", "title": "2020 International Conference on Computer Vision, Image and Deep Learning (CVIDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsmt/2020/8668/0/866800a123", "title": "Research on Motion Algorithm of glass cleaning robot on regular shaped glass", "doi": null, "abstractUrl": "/proceedings-article/iccsmt/2020/866800a123/1u8pBRyIx6E", "parentPublication": { "id": "proceedings/iccsmt/2020/8668/0", "title": "2020 International Conference on Computer Science and Management Technology (ICCSMT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsmt/2020/8668/0/866800a009", "title": "SLAM Global Positioning Algorithm Based on Laser and Vision Fusion", "doi": null, "abstractUrl": "/proceedings-article/iccsmt/2020/866800a009/1u8pDO7YPzG", "parentPublication": { "id": "proceedings/iccsmt/2020/8668/0", "title": "2020 International Conference on Computer Science and Management Technology (ICCSMT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900n3410", "title": "Rich Context Aggregation with Reflection Prior for Glass Surface Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900n3410/1yeM48LNgT6", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCmpcNk", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNAq3hRK", "doi": "10.1109/VISUAL.2005.1532782", "title": "On the optimization of visualizations of complex phenomena", "normalizedTitle": "On the optimization of visualizations of complex phenomena", "abstract": "The problem of perceptually optimizing complex visualizations is a difficult one, involving perceptual as well as aesthetic issues. In our experience, controlled experiments are quite limited in their ability to uncover interrelationships among visualization parameters, and thus may not be the most useful way to develop rules-of-thumb or theory to guide the production of high-quality visualizations. In this paper, we propose a new experimental approach to optimizing visualization quality that integrates some of the strong points of controlled experiments with methods more suited to investigating complex highly-coupled phenomena. We use human-in-the-loop experiments to search through visualization parameter space, generating large databases of rated visualization solutions. This is followed by data mining to extract results such as exemplar visualizations, guidelines for producing visualizations, and hypotheses about strategies leading to strong visualizations. The approach can easily address both perceptual and aesthetic concerns, and can handle complex parameter interactions. We suggest a genetic algorithm as a valuable way of guiding the human-in-the-loop search through visualization parameter space. We describe our methods for using clustering, histogramming, principal component analysis, and neural networks for data mining. The experimental approach is illustrated with a study of the problem of optimal texturing for viewing layered surfaces so that both surfaces are maximally observable.", "abstracts": [ { "abstractType": "Regular", "content": "The problem of perceptually optimizing complex visualizations is a difficult one, involving perceptual as well as aesthetic issues. In our experience, controlled experiments are quite limited in their ability to uncover interrelationships among visualization parameters, and thus may not be the most useful way to develop rules-of-thumb or theory to guide the production of high-quality visualizations. In this paper, we propose a new experimental approach to optimizing visualization quality that integrates some of the strong points of controlled experiments with methods more suited to investigating complex highly-coupled phenomena. We use human-in-the-loop experiments to search through visualization parameter space, generating large databases of rated visualization solutions. This is followed by data mining to extract results such as exemplar visualizations, guidelines for producing visualizations, and hypotheses about strategies leading to strong visualizations. The approach can easily address both perceptual and aesthetic concerns, and can handle complex parameter interactions. We suggest a genetic algorithm as a valuable way of guiding the human-in-the-loop search through visualization parameter space. We describe our methods for using clustering, histogramming, principal component analysis, and neural networks for data mining. The experimental approach is illustrated with a study of the problem of optimal texturing for viewing layered surfaces so that both surfaces are maximally observable.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The problem of perceptually optimizing complex visualizations is a difficult one, involving perceptual as well as aesthetic issues. In our experience, controlled experiments are quite limited in their ability to uncover interrelationships among visualization parameters, and thus may not be the most useful way to develop rules-of-thumb or theory to guide the production of high-quality visualizations. In this paper, we propose a new experimental approach to optimizing visualization quality that integrates some of the strong points of controlled experiments with methods more suited to investigating complex highly-coupled phenomena. We use human-in-the-loop experiments to search through visualization parameter space, generating large databases of rated visualization solutions. This is followed by data mining to extract results such as exemplar visualizations, guidelines for producing visualizations, and hypotheses about strategies leading to strong visualizations. The approach can easily address both perceptual and aesthetic concerns, and can handle complex parameter interactions. We suggest a genetic algorithm as a valuable way of guiding the human-in-the-loop search through visualization parameter space. We describe our methods for using clustering, histogramming, principal component analysis, and neural networks for data mining. The experimental approach is illustrated with a study of the problem of optimal texturing for viewing layered surfaces so that both surfaces are maximally observable.", "fno": "01532782", "keywords": [ "Data Visualisation", "Data Mining", "Very Large Databases", "Genetic Algorithms", "Pattern Clustering", "Principal Component Analysis", "Neural Nets", "Search Problems", "Optimization", "Complex Phenomena Visualizations", "Human In The Loop Experiments", "Visualization Parameter Space", "Large Databases", "Data Mining", "Genetic Algorithm", "Histogramming", "Principal Component Analysis", "Neural Networks", "Data Visualization", "Data Mining", "Surface Texture", "Production", "Optimization Methods", "Visual Databases", "Guidelines", "Genetic Algorithms", "Principal Component Analysis", "Neural Networks" ], "authors": [ { "affiliation": "Texas A&M Univ., College Station, TX, USA", "fullName": "D. House", "givenName": "D.", "surname": "House", "__typename": "ArticleAuthorType" }, { "affiliation": "Texas A&M Univ., College Station, TX, USA", "fullName": "A. Bair", "givenName": "A.", "surname": "Bair", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "C. Ware", "givenName": "C.", "surname": "Ware", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-01-01T00:00:00", "pubType": "proceedings", "pages": "87,88,89,90,91,92,93,94", "year": "2005", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01532781", "articleId": "12OmNyRPgMl", "__typename": "AdjacentArticleType" }, "next": { "fno": "01532783", "articleId": "12OmNAle6mg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/es/2016/3790/0/07880487", "title": "Recommendations for Data Visualizations Based on Gestalt Patterns", "doi": null, "abstractUrl": "/proceedings-article/es/2016/07880487/12OmNCb3ftz", "parentPublication": { "id": "proceedings/es/2016/3790/0", "title": "2016 4th International Conference on Enterprise Systems (ES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660012", "title": "On the Optimization of Visualizations of Complex Phenomena", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660012/12OmNCfSqN3", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edoc/2015/9203/0/9203a068", "title": "A Description Framework for Data Visualizations in Enterprise Information Systems", "doi": null, "abstractUrl": "/proceedings-article/edoc/2015/9203a068/12OmNxd4tAO", "parentPublication": { "id": "proceedings/edoc/2015/9203/0", "title": "2015 IEEE 19th International Enterprise Distributed Object Computing Conference (EDOC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010060943", "title": "How Information Visualization Novices Construct Visualizations", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010060943/13rRUwInvAZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/04/v0509", "title": "An Approach to the Perceptual Optimization of Complex Visualizations", "doi": null, "abstractUrl": "/journal/tg/2006/04/v0509/13rRUx0gepU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017646", "title": "Extracting and Retargeting Color Mappings from Bitmap Images of Visualizations", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017646/13rRUxYIN4e", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875978", "title": "Ranking Visualizations of Correlation Using Weber's Law", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875978/13rRUyeCkai", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08454489", "title": "Patterns and Pace: Quantifying Diverse Exploration Behavior with Visualizations on the Web", "doi": null, "abstractUrl": "/journal/tg/2019/01/08454489/17D45W1Oa3s", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08454343", "title": "A Heuristic Approach to Value-Driven Evaluation of Visualizations", "doi": null, "abstractUrl": "/journal/tg/2019/01/08454343/17D45XDIXSW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09916137", "title": "Revisiting the Design Patterns of Composite Visualizations", "doi": null, "abstractUrl": "/journal/tg/5555/01/09916137/1HojAjSAGNq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yfxDjRGMmc", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeQJrGq6WI", "doi": "10.1109/ISMAR-Adjunct54149.2021.00073", "title": "Towards In-situ Authoring of AR Visualizations with Mobile Devices", "normalizedTitle": "Towards In-situ Authoring of AR Visualizations with Mobile Devices", "abstract": "Augmented Reality (AR) has been shown to enhance the data visualization and analysis process by supporting users in their immersive exploration of data in a real-world context. However, authoring such visualizations still heavily relies on traditional, stationary desktop setups, which inevitably separates users from the actual working space. To better support the authoring process in immersive environments, we propose the integration of spatially-aware mobile devices. Such devices also enable precise touch interaction for data configuration while lowering the entry barriers of novel immersive technologies. We therefore contribute an initial set of concepts within a scenario for authoring AR visualizations. We implemented an early prototype for configuring visualizations in-situ on the mobile device without programming and report our first impressions.", "abstracts": [ { "abstractType": "Regular", "content": "Augmented Reality (AR) has been shown to enhance the data visualization and analysis process by supporting users in their immersive exploration of data in a real-world context. However, authoring such visualizations still heavily relies on traditional, stationary desktop setups, which inevitably separates users from the actual working space. To better support the authoring process in immersive environments, we propose the integration of spatially-aware mobile devices. Such devices also enable precise touch interaction for data configuration while lowering the entry barriers of novel immersive technologies. We therefore contribute an initial set of concepts within a scenario for authoring AR visualizations. We implemented an early prototype for configuring visualizations in-situ on the mobile device without programming and report our first impressions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Augmented Reality (AR) has been shown to enhance the data visualization and analysis process by supporting users in their immersive exploration of data in a real-world context. However, authoring such visualizations still heavily relies on traditional, stationary desktop setups, which inevitably separates users from the actual working space. To better support the authoring process in immersive environments, we propose the integration of spatially-aware mobile devices. Such devices also enable precise touch interaction for data configuration while lowering the entry barriers of novel immersive technologies. We therefore contribute an initial set of concepts within a scenario for authoring AR visualizations. We implemented an early prototype for configuring visualizations in-situ on the mobile device without programming and report our first impressions.", "fno": "129800a324", "keywords": [ "Augmented Reality", "Authoring Systems", "Data Analysis", "Data Visualisation", "Human Computer Interaction", "Mobile Computing", "AR Visualizations", "Augmented Reality", "Data Visualization", "Immersive Environments", "Spatially Aware Mobile Devices", "Touch Interaction", "Data Configuration", "Immersive Technologies", "In Situ Authoring", "Data Analysis", "Data Visualization", "Prototypes", "Mobile Handsets", "Augmented Reality", "Space Stations", "Human Centered Computing", "Mixed Augmented Reality", "Visualization" ], "authors": [ { "affiliation": "Interactive Media Lab Technische Universität Dresden", "fullName": "Marc Satkowski", "givenName": "Marc", "surname": "Satkowski", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Media Lab Technische Universität Dresden", "fullName": "Weizhou Luo", "givenName": "Weizhou", "surname": "Luo", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Media Lab Technische Universität Dresden", "fullName": "Raimund Dachselt", "givenName": "Raimund", "surname": "Dachselt", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "324-325", "year": "2021", "issn": null, "isbn": "978-1-6654-1298-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "129800a321", "articleId": "1yeQEyk3fbO", "__typename": "AdjacentArticleType" }, "next": { "fno": "129800a326", "articleId": "1yeQN0kq9G0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icsc/2016/0662/0/0662a358", "title": "Mobile Augmented Reality Authoring Tool", "doi": null, "abstractUrl": "/proceedings-article/icsc/2016/0662a358/12OmNAXglVC", "parentPublication": { "id": "proceedings/icsc/2016/0662/0", "title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuvr/2010/4124/0/4124a040", "title": "ARtalet: Tangible User Interface Based Immersive Augmented Reality Authoring Tool for Digilog Book", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2010/4124a040/12OmNrEL2B8", "parentPublication": { "id": "proceedings/isuvr/2010/4124/0", "title": "International Symposium on Ubiquitous Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icppw/2011/4511/0/4511a063", "title": "AR-Based Positioning for Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/icppw/2011/4511a063/12OmNwwuE0H", "parentPublication": { "id": "proceedings/icppw/2011/4511/0", "title": "2011 40th International Conference on Parallel Processing Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2002/1781/0/17810237", "title": "A Pragmatic Approach to Augmented Reality Authoring", "doi": null, "abstractUrl": "/proceedings-article/ismar/2002/17810237/12OmNxV4iuj", "parentPublication": { "id": "proceedings/ismar/2002/1781/0", "title": "Proceedings. International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444806", "title": "In-Place Sketching for content authoring in Augmented Reality games", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444806/12OmNxveNJV", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/08/08611113", "title": "MARVisT: Authoring Glyph-Based Visualization in Mobile Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2020/08/08611113/17D45Wuc367", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a654", "title": "Bring Store in My Room: AR Store Authoring System for Spatial Experience in Mobile Shopping", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a654/1J7WqitKsAU", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a558", "title": "ATOFIS, an AR Training System for Manual Assembly: A Full Comparative Evaluation against Guides", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a558/1JrRgTi23y8", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798148", "title": "CAVE-AR: A VR Authoring System to Interactively Design, Simulate, and Debug Multi-user AR Experiences", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798148/1cJ0FRS6rjG", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797973", "title": "A Comparison of Desktop and Augmented Reality Scenario Based Training Authoring Tools", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797973/1cJ0S2MS49O", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAlvHKL", "title": "2011 Tenth International Symposium on Autonomous Decentralized Systems", "acronym": "isads", "groupId": "1000067", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNBU1jQp", "doi": "10.1109/ISADS.2011.33", "title": "Low-Power Off-Chip Memory Design for Video Decoder Using Embedded Bus-Invert Coding", "normalizedTitle": "Low-Power Off-Chip Memory Design for Video Decoder Using Embedded Bus-Invert Coding", "abstract": "In this paper, a simple, efficient, low power off-chip memory design is proposed, which fully exploits the features of DRAM memory and video application, as well as overcomes the drawbacks of algorithm complexity and system modification of embedded compression, which is a popular way to decrease power consumption of the off-chip memory. The integration of the scheme into video decoder will not involve any extra video decoding complexity. It adopts the simple bus-invert encoding scheme. Based on the fact that the power consumption of logic `0' bit is less than that of logic `1', bus-invert encoding scheme is applied to the transferring data between video decoder and off-chip memory. Meanwhile, the features of fault tolerance of human eyes and lossy processing of video decoding application are exploited to solve the extra flag-bit of encoder scheme in off-chip SDARM memory, which has the fixed bit width and is less flexible than on-chip SRAM. This scheme is integrated into MPEG-2 decoder system. The experiment results show that this scheme can archive 20%-35% reduction in power consumption of logic `1' bit, and the objective quality of image has about 1.5db PSNR improvement on average.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, a simple, efficient, low power off-chip memory design is proposed, which fully exploits the features of DRAM memory and video application, as well as overcomes the drawbacks of algorithm complexity and system modification of embedded compression, which is a popular way to decrease power consumption of the off-chip memory. The integration of the scheme into video decoder will not involve any extra video decoding complexity. It adopts the simple bus-invert encoding scheme. Based on the fact that the power consumption of logic `0' bit is less than that of logic `1', bus-invert encoding scheme is applied to the transferring data between video decoder and off-chip memory. Meanwhile, the features of fault tolerance of human eyes and lossy processing of video decoding application are exploited to solve the extra flag-bit of encoder scheme in off-chip SDARM memory, which has the fixed bit width and is less flexible than on-chip SRAM. This scheme is integrated into MPEG-2 decoder system. The experiment results show that this scheme can archive 20%-35% reduction in power consumption of logic `1' bit, and the objective quality of image has about 1.5db PSNR improvement on average.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, a simple, efficient, low power off-chip memory design is proposed, which fully exploits the features of DRAM memory and video application, as well as overcomes the drawbacks of algorithm complexity and system modification of embedded compression, which is a popular way to decrease power consumption of the off-chip memory. The integration of the scheme into video decoder will not involve any extra video decoding complexity. It adopts the simple bus-invert encoding scheme. Based on the fact that the power consumption of logic `0' bit is less than that of logic `1', bus-invert encoding scheme is applied to the transferring data between video decoder and off-chip memory. Meanwhile, the features of fault tolerance of human eyes and lossy processing of video decoding application are exploited to solve the extra flag-bit of encoder scheme in off-chip SDARM memory, which has the fixed bit width and is less flexible than on-chip SRAM. This scheme is integrated into MPEG-2 decoder system. The experiment results show that this scheme can archive 20%-35% reduction in power consumption of logic `1' bit, and the objective quality of image has about 1.5db PSNR improvement on average.", "fno": "05741318", "keywords": [ "Data Compression", "DRAM Chips", "Video Coding", "Low Power Off Chip Memory Design", "Video Decoder", "Embedded Compression", "Power Consumption", "Bus Invert Encoding Scheme", "Data Transferring", "Off Chip SDARM Memory", "MPEG 2 Decoder System", "Image Quality", "PSNR", "Word Length 0 Bit", "Word Length 1 Bit", "Decoding", "SDRAM", "Encoding", "Power Demand", "Memory Management", "Image Coding", "Low Power", "Bus Invert Encoding Scheme", "Off Chip SDRAM Memory", "MPEG 2 Decoder", "Fault Tolerance", "Lossy Processing" ], "authors": [ { "affiliation": null, "fullName": "Ni Zhou", "givenName": "Ni", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Fei Qiao", "givenName": "Fei", "surname": "Qiao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Huazhong Yang", "givenName": "Huazhong", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hui Wang", "givenName": "Hui", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "isads", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-03-01T00:00:00", "pubType": "proceedings", "pages": "251-255", "year": "2011", "issn": "1541-0056", "isbn": "978-1-61284-213-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05741317", "articleId": "12OmNCdBDUn", "__typename": "AdjacentArticleType" }, "next": { "fno": "05741319", "articleId": "12OmNqFrGFi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2008/2570/0/04607556", "title": "Flexible distribution of computational complexity between the encoder and the decoder in distributed video coding", "doi": null, "abstractUrl": "/proceedings-article/icme/2008/04607556/12OmNANkobN", "parentPublication": { "id": "proceedings/icme/2008/2570/0", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccd/1992/3110/0/00276242", "title": "MARVLE: a VLSI chip for variable length encoding and decoding", "doi": null, "abstractUrl": "/proceedings-article/iccd/1992/00276242/12OmNAoDhVU", "parentPublication": { "id": "proceedings/iccd/1992/3110/0", "title": "Proceedings 1992 IEEE International Conference on Computer Design: VLSI in Computers & Processors", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2008/2570/0/04607503", "title": "Joint spatial and temporal correlation exploitation for Wyner-Ziv frames coding in DVC", "doi": null, "abstractUrl": "/proceedings-article/icme/2008/04607503/12OmNBO3JUx", "parentPublication": { "id": "proceedings/icme/2008/2570/0", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011968", "title": "Decoder driven multi resolution side information refinements and mode decisions for improved rate-distortion performance in distributed video coding", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011968/12OmNBpEeQP", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2008/2570/0/04607507", "title": "Single-loop decoding for multiview video coding", "doi": null, "abstractUrl": "/proceedings-article/icme/2008/04607507/12OmNrHjqO6", "parentPublication": { "id": "proceedings/icme/2008/2570/0", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890699", "title": "On a 10-bit coding profile for AVS2 standard", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890699/12OmNy4r3MU", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2014/04/mmu2014040084", "title": "Context-Adaptive Modeling for Wavelet-Domain Distributed Video Coding", "doi": null, "abstractUrl": "/magazine/mu/2014/04/mmu2014040084/13rRUB7a18k", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2015/12/07050349", "title": "A Frame-Parallel 2 Gpixel/s Video Decoder Chip for UHDTV and 3-DTV/FTV Applications", "doi": null, "abstractUrl": "/journal/si/2015/12/07050349/13rRUytF478", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900b777", "title": "Super-Resolution based Video Coding Scheme", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900b777/1G578ylCePe", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102799", "title": "Decoder-Side Intra Mode Derivation For Next Generation Video Coding", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102799/1kwrkC8YnSM", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyxFKau", "title": "2016 International Conference on Network and Information Systems for Computers (ICNISC)", "acronym": "icnisc", "groupId": "1807445", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNBV9Iig", "doi": "10.1109/ICNISC.2016.049", "title": "Research and Implementation of Video Codec Based on FFmpeg", "normalizedTitle": "Research and Implementation of Video Codec Based on FFmpeg", "abstract": "In this information age, a efficient video encoding/decoding technology will help reducing information redundancy, delaying processing, enhancing the robustness, improving visual perception of quality, increasing the compression ratio, etc. At first, this article describes the history of video encoding/decoding standards development. Research on the key technologies of H.264 coding standard currently which is most widely used. And this thesis will further analysis on the technical improvement of the new generation of High Efficiency Video Coding (HEVC) standard. At the same time, according to the international standard and theory, using ffmpeg and sdl2.0 function library, in win32 environment, and through MFC package the code, developed a software. Through the software debug, realized the encoding/decoding and playing H264 and HEVC format video in this thesis, meanwhile, it also realized the mutual format conversion between H264 and HEVC format.", "abstracts": [ { "abstractType": "Regular", "content": "In this information age, a efficient video encoding/decoding technology will help reducing information redundancy, delaying processing, enhancing the robustness, improving visual perception of quality, increasing the compression ratio, etc. At first, this article describes the history of video encoding/decoding standards development. Research on the key technologies of H.264 coding standard currently which is most widely used. And this thesis will further analysis on the technical improvement of the new generation of High Efficiency Video Coding (HEVC) standard. At the same time, according to the international standard and theory, using ffmpeg and sdl2.0 function library, in win32 environment, and through MFC package the code, developed a software. Through the software debug, realized the encoding/decoding and playing H264 and HEVC format video in this thesis, meanwhile, it also realized the mutual format conversion between H264 and HEVC format.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this information age, a efficient video encoding/decoding technology will help reducing information redundancy, delaying processing, enhancing the robustness, improving visual perception of quality, increasing the compression ratio, etc. At first, this article describes the history of video encoding/decoding standards development. Research on the key technologies of H.264 coding standard currently which is most widely used. And this thesis will further analysis on the technical improvement of the new generation of High Efficiency Video Coding (HEVC) standard. At the same time, according to the international standard and theory, using ffmpeg and sdl2.0 function library, in win32 environment, and through MFC package the code, developed a software. Through the software debug, realized the encoding/decoding and playing H264 and HEVC format video in this thesis, meanwhile, it also realized the mutual format conversion between H264 and HEVC format.", "fno": "07945976", "keywords": [ "Program Debugging", "Video Codecs", "Video Coding", "Video Codec", "F Fmpeg", "Video Encoding Decoding Technology", "Information Redundancy", "Delaying Processing", "Visual Perception", "Compression Ratio", "H 264 Coding Standard", "High Efficiency Video Coding Standard", "HEVC Standard", "Software Debug", "Format Video", "Mutual Format Conversion", "Standards", "Encoding", "Decoding", "Image Coding", "Transform Coding", "Video Coding", "Transforms", "HEVC", "H 264", "Ffmpeg", "CABAC", "CAVLC" ], "authors": [ { "affiliation": null, "fullName": "Hao Zeng", "givenName": "Hao", "surname": "Zeng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhiyong Zhang", "givenName": "Zhiyong", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Lulin Shi", "givenName": "Lulin", "surname": "Shi", "__typename": "ArticleAuthorType" } ], "idPrefix": "icnisc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-04-01T00:00:00", "pubType": "proceedings", "pages": "184-188", "year": "2016", "issn": null, "isbn": "978-1-4673-8838-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07945975", "articleId": "12OmNy50gfL", "__typename": "AdjacentArticleType" }, "next": { "fno": "07945977", "articleId": "12OmNweTvQe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icassp/2004/8484/3/01326533", "title": "A rate-distortion optimal hybrid scalable/multiple-description video codec", "doi": null, "abstractUrl": "/proceedings-article/icassp/2004/01326533/12OmNBDQbgU", "parentPublication": { "id": "proceedings/icassp/2004/8484/3", "title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2016/1853/0/07786173", "title": "High Dynamic Range Video Coding with Backward Compatibility", "doi": null, "abstractUrl": "/proceedings-article/dcc/2016/07786173/12OmNxcMSkC", "parentPublication": { "id": "proceedings/dcc/2016/1853/0", "title": "2016 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2016/1853/0/07786192", "title": "The Thor Video Codec", "doi": null, "abstractUrl": "/proceedings-article/dcc/2016/07786192/12OmNxdm4uA", "parentPublication": { "id": "proceedings/dcc/2016/1853/0", "title": "2016 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2015/6959/0/07069380", "title": "Real-time CPU based H.265/HEVC encoding solution with x86 platform technology", "doi": null, "abstractUrl": "/proceedings-article/icnc/2015/07069380/12OmNyKJihB", "parentPublication": { "id": "proceedings/icnc/2015/6959/0", "title": "2015 International Conference on Computing, Networking and Communications (ICNC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2017/6721/0/07923716", "title": "Adaptive Transforms for Inter-Predicted Residuals in Post-HEVC Video Coding", "doi": null, "abstractUrl": "/proceedings-article/dcc/2017/07923716/12OmNyQpgZh", "parentPublication": { "id": "proceedings/dcc/2017/6721/0", "title": "2017 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitcs/2014/6541/0/07021717", "title": "A Study of High Efficiency Video Coding File Format", "doi": null, "abstractUrl": "/proceedings-article/icitcs/2014/07021717/12OmNz2TCvf", "parentPublication": { "id": "proceedings/icitcs/2014/6541/0", "title": "2014 International Conference on IT Convergence and Security (ICITCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2016/7258/0/07552864", "title": "Subjective-quality-optimized complexity control for HEVC decoding", "doi": null, "abstractUrl": "/proceedings-article/icme/2016/07552864/12OmNzBOhCf", "parentPublication": { "id": "proceedings/icme/2016/7258/0", "title": "2016 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dese/2016/5487/0/07930644", "title": "HEVC Based Multi-view Video Codec Using Frame Interleaving Technique", "doi": null, "abstractUrl": "/proceedings-article/dese/2016/07930644/12OmNzBOhvc", "parentPublication": { "id": "proceedings/dese/2016/5487/0", "title": "2016 9th International Conference on Developments in eSystems Engineering (DeSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/2/01394501", "title": "An efficient JPEG2000-based human image storage system", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394501/12OmNzC5Tr4", "parentPublication": { "id": "proceedings/icme/2004/8603/2", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnisc/2022/5351/0/535100a019", "title": "Design of Video Codec Platform Based on PowerPC+WW602 Architecture", "doi": null, "abstractUrl": "/proceedings-article/icnisc/2022/535100a019/1KYtg0tVEIM", "parentPublication": { "id": "proceedings/icnisc/2022/5351/0", "title": "2022 8th Annual International Conference on Network and Information Systems for Computers (ICNISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAkEU4f", "title": "2011 IEEE International Conference on Multimedia and Expo", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNBpEeQP", "doi": "10.1109/ICME.2011.6011968", "title": "Decoder driven multi resolution side information refinements and mode decisions for improved rate-distortion performance in distributed video coding", "normalizedTitle": "Decoder driven multi resolution side information refinements and mode decisions for improved rate-distortion performance in distributed video coding", "abstract": "Distributed video coding (DVC) suits the light weight encoder and complex decoders, shifting the motion estimation to the decoder, in contrast to the conventional encoding scheme. Mode decision is one of the factor affecting the Rate Distortion (RD) performance due to the varying statistics of the sequence in the DVC. In this paper, we propose to introduce coding modes and the Side Information (SI) refinement modes at the decoder in the pixel domain multi resolution refinement(MRR) Wyner-Ziv scheme. During the decoding process, the mode decision of the current decoding layer of the block is based on the information from the previous decoded layers of the block and the updated SI. We further propose the adaptive weighted reconstruction scheme using the neighbor pixels. RD performance of the proposed scheme is tested with several sequences. The results shows significant performance improvement.", "abstracts": [ { "abstractType": "Regular", "content": "Distributed video coding (DVC) suits the light weight encoder and complex decoders, shifting the motion estimation to the decoder, in contrast to the conventional encoding scheme. Mode decision is one of the factor affecting the Rate Distortion (RD) performance due to the varying statistics of the sequence in the DVC. In this paper, we propose to introduce coding modes and the Side Information (SI) refinement modes at the decoder in the pixel domain multi resolution refinement(MRR) Wyner-Ziv scheme. During the decoding process, the mode decision of the current decoding layer of the block is based on the information from the previous decoded layers of the block and the updated SI. We further propose the adaptive weighted reconstruction scheme using the neighbor pixels. RD performance of the proposed scheme is tested with several sequences. The results shows significant performance improvement.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Distributed video coding (DVC) suits the light weight encoder and complex decoders, shifting the motion estimation to the decoder, in contrast to the conventional encoding scheme. Mode decision is one of the factor affecting the Rate Distortion (RD) performance due to the varying statistics of the sequence in the DVC. In this paper, we propose to introduce coding modes and the Side Information (SI) refinement modes at the decoder in the pixel domain multi resolution refinement(MRR) Wyner-Ziv scheme. During the decoding process, the mode decision of the current decoding layer of the block is based on the information from the previous decoded layers of the block and the updated SI. We further propose the adaptive weighted reconstruction scheme using the neighbor pixels. RD performance of the proposed scheme is tested with several sequences. The results shows significant performance improvement.", "fno": "06011968", "keywords": [ "Silicon", "Decoding", "Encoding", "Image Reconstruction", "Strontium", "Correlation", "Video Coding", "Distributed Video Coding", "Successive Refinement", "Pixel Domain WZ Video Coding" ], "authors": [ { "affiliation": "Electronics & Electrical Communication Engineering, Indian Institute of Technology Kharagpur, India-721302", "fullName": "Vijay Kumar", "givenName": "Vijay", "surname": "Kumar", "__typename": "ArticleAuthorType" }, { "affiliation": "Electronics & Electrical Communication Engineering, Indian Institute of Technology Kharagpur, India-721302", "fullName": "Somnath Sengupta", "givenName": "Somnath", "surname": "Sengupta", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2011", "issn": "1945-7871", "isbn": "978-1-61284-348-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06012030", "articleId": "12OmNA0vo1u", "__typename": "AdjacentArticleType" }, "next": { "fno": "06012254", "articleId": "12OmNxVlTFn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/csie/2009/3507/6/3507f312", "title": "An Improved Side Information Refinement Method in Distributed Video Coding", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507f312/12OmNAQrYFu", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2007/1016/0/04284612", "title": "An Iterative Refinement Technique for Side Information Generation in DVC", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284612/12OmNCmGNOw", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2008/2570/0/04607541", "title": "Wyner-Ziv video coding with block classification", "doi": null, "abstractUrl": "/proceedings-article/icme/2008/04607541/12OmNvT2oZo", "parentPublication": { "id": "proceedings/icme/2008/2570/0", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2016/1853/0/07786164", "title": "A Reconstruction Algorithm with Multiple Side Information for Distributed Compression of Sparse Sources", "doi": null, "abstractUrl": "/proceedings-article/dcc/2016/07786164/12OmNwD1pZ9", "parentPublication": { "id": "proceedings/dcc/2016/1853/0", "title": "2016 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06012078", "title": "An improved block matching and prediction algorithm for multi-view video with distributed video codec", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06012078/12OmNwMFMhJ", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2007/1834/0/04458187", "title": "Side Information Generation Using Extra Information in Distributed Video Coding", "doi": null, "abstractUrl": "/proceedings-article/isspit/2007/04458187/12OmNwx3Q9Z", "parentPublication": { "id": "proceedings/isspit/2007/1834/0", "title": "2007 IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2011/279/0/05749533", "title": "An Efficient Distributed Video Coding with Parallelized Design for Concurrent Computing", "doi": null, "abstractUrl": "/proceedings-article/dcc/2011/05749533/12OmNwx3QaE", "parentPublication": { "id": "proceedings/dcc/2011/279/0", "title": "2011 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2012/2027/0/06266225", "title": "Improving the Rate-Distortion Performance of the Transform Domain Refinement Codec by the Use of Decoder-Driven Adaptive Modes", "doi": null, "abstractUrl": "/proceedings-article/icmew/2012/06266225/12OmNyQ7FWY", "parentPublication": { "id": "proceedings/icmew/2012/2027/0", "title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2015/0379/0/0379a327", "title": "Scalable Saliency-Aware Distributed Compressive Video Sensing", "doi": null, "abstractUrl": "/proceedings-article/ism/2015/0379a327/12OmNyqiaPN", "parentPublication": { "id": "proceedings/ism/2015/0379/0", "title": "2015 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2014/04/mmu2014040084", "title": "Context-Adaptive Modeling for Wavelet-Domain Distributed Video Coding", "doi": null, "abstractUrl": "/magazine/mu/2014/04/mmu2014040084/13rRUB7a18k", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirr", "title": "Computer Science and Software Engineering, International Conference on", "acronym": "csse", "groupId": "1002553", "volume": "1", "displayVolume": "1", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNqI04PU", "doi": "10.1109/CSSE.2008.448", "title": "Faster Progressive Wavelet Coding of Images", "normalizedTitle": "Faster Progressive Wavelet Coding of Images", "abstract": "Progressive Wavelet Coding of Images (PWC) is a simple and efficient image coding algorithm. It is comparable to the state-of-the-art SPIHT. It is based only on two concepts: (1) data-independent reordering and blocking, and (2) low-complexity independent encoding of each block via adaptive Run-Length/Rice coding of the bit planes. Therefore PWC allows for progressive image encoding that is scalable both in resolution and bit rate. But its reordering and un-reordering algorithms are very complex and computation speed is limited, so this paper proposes a new reordering and un-reordering algorithm to improve significantly the encoding and decoding speed of PWC, making PWC have wide applications.", "abstracts": [ { "abstractType": "Regular", "content": "Progressive Wavelet Coding of Images (PWC) is a simple and efficient image coding algorithm. It is comparable to the state-of-the-art SPIHT. It is based only on two concepts: (1) data-independent reordering and blocking, and (2) low-complexity independent encoding of each block via adaptive Run-Length/Rice coding of the bit planes. Therefore PWC allows for progressive image encoding that is scalable both in resolution and bit rate. But its reordering and un-reordering algorithms are very complex and computation speed is limited, so this paper proposes a new reordering and un-reordering algorithm to improve significantly the encoding and decoding speed of PWC, making PWC have wide applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Progressive Wavelet Coding of Images (PWC) is a simple and efficient image coding algorithm. It is comparable to the state-of-the-art SPIHT. It is based only on two concepts: (1) data-independent reordering and blocking, and (2) low-complexity independent encoding of each block via adaptive Run-Length/Rice coding of the bit planes. Therefore PWC allows for progressive image encoding that is scalable both in resolution and bit rate. But its reordering and un-reordering algorithms are very complex and computation speed is limited, so this paper proposes a new reordering and un-reordering algorithm to improve significantly the encoding and decoding speed of PWC, making PWC have wide applications.", "fno": "3336a188", "keywords": [ "Progressive Wavelet Coding Of Images", "Reorder", "Unreorder", "Encoding Speed", "Decoding Speed" ], "authors": [ { "affiliation": null, "fullName": "Cuixiang Zhong", "givenName": "Cuixiang", "surname": "Zhong", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Minghe Huang", "givenName": "Minghe", "surname": "Huang", "__typename": "ArticleAuthorType" } ], "idPrefix": "csse", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "188-191", "year": "2008", "issn": null, "isbn": "978-0-7695-3336-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3336a184", "articleId": "12OmNrAMESE", "__typename": "AdjacentArticleType" }, "next": { "fno": "3336a192", "articleId": "12OmNAkWvFr", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dcc/1996/7358/0/73580406", "title": "Quadtree-guided wavelet image coding", "doi": null, "abstractUrl": "/proceedings-article/dcc/1996/73580406/12OmNA0vnN5", "parentPublication": { "id": "proceedings/dcc/1996/7358/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/1999/0096/0/00960336", "title": "Fast Progressive Wavelet Coding", "doi": null, "abstractUrl": "/proceedings-article/dcc/1999/00960336/12OmNAk5HM4", "parentPublication": { "id": "proceedings/dcc/1999/0096/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2008/3121/0/3121a530", "title": "Improved Wavelet-Based Embedded Image Coding Using a Dynamic Index Reordering Vector Quantizer", "doi": null, "abstractUrl": "/proceedings-article/dcc/2008/3121a530/12OmNBDyAbw", "parentPublication": { "id": "proceedings/dcc/2008/3121/0", "title": "2008 Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2002/1477/0/14770362", "title": "Progressive Coding of Palette Images and Digital Maps", "doi": null, "abstractUrl": "/proceedings-article/dcc/2002/14770362/12OmNBEGYKg", "parentPublication": { "id": "proceedings/dcc/2002/1477/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2011/4501/0/4501a003", "title": "Progressive Wavelet Coding of Images Using Linear Indexing Technique", "doi": null, "abstractUrl": "/proceedings-article/iccis/2011/4501a003/12OmNBZHigh", "parentPublication": { "id": "proceedings/iccis/2011/4501/0", "title": "2011 International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/1/81831572", "title": "Progressive image indexing and retrieval based on embedded wavelet coding", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81831572/12OmNBpEeIM", "parentPublication": { "id": "proceedings/icip/1997/8183/1", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfcc/2009/3591/0/3591a182", "title": "Performance Evaluation of Shape Adaptive Discrete Wavelet Transform Based Magnetic Resonance Images Coding", "doi": null, "abstractUrl": "/proceedings-article/icfcc/2009/3591a182/12OmNwCJON1", "parentPublication": { "id": "proceedings/icfcc/2009/3591/0", "title": "2009 International Conference on Future Computer and Communication (ICFCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/1/81831053", "title": "Progressive image coding with spatially variable resolution", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81831053/12OmNwIHosB", "parentPublication": { "id": "proceedings/icip/1997/8183/1", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/3/81833575", "title": "Progressive transmission of images using block wavelet transform", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81833575/12OmNxT56CD", "parentPublication": { "id": "proceedings/icip/1997/8183/3", "title": "Proceedings of International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2000/0592/0/05920243", "title": "Fast Progressive Image Coding without Wavelets", "doi": null, "abstractUrl": "/proceedings-article/dcc/2000/05920243/12OmNyshmJ2", "parentPublication": { "id": "proceedings/dcc/2000/0592/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAH5djZ", "title": "2016 12th International Conference on Mobile Ad-Hoc and Sensor Networks (MSN)", "acronym": "msn", "groupId": "1002549", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNx8wTuZ", "doi": "10.1109/MSN.2016.068", "title": "Visual Secret Sharing Scheme with (k, n) Threshold Based on QR Codes", "normalizedTitle": "Visual Secret Sharing Scheme with (k, n) Threshold Based on QR Codes", "abstract": "In this paper, a novel visual secret sharing (VSS) scheme with using QR codes is investigated. The proposed visual secret sharing scheme based on QR codes(VSSQR) can visually reveal secret image by stacking k or more shares (shadow images) from all the n QR codes as well as scan the QR code by a QR code reader. Our VSSQR exploits the error correction mechanism in the QR code structure, to embed the bits corresponding to shares generated by VSS from a secret bit into the same locations of QR codes in the processing of encoding QR. Each output share is a valid QR code, which may reduce the likelihood of attracting the attention of potential attackers, that can be scanned and decoded utilizing a QR code reader. The secret image can be recovered by stacking for case (k, n) based on the human visual system without any computation. In addition, it can assist alignment for VSS recovery. The experiment results show the effectiveness of our scheme.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, a novel visual secret sharing (VSS) scheme with using QR codes is investigated. The proposed visual secret sharing scheme based on QR codes(VSSQR) can visually reveal secret image by stacking k or more shares (shadow images) from all the n QR codes as well as scan the QR code by a QR code reader. Our VSSQR exploits the error correction mechanism in the QR code structure, to embed the bits corresponding to shares generated by VSS from a secret bit into the same locations of QR codes in the processing of encoding QR. Each output share is a valid QR code, which may reduce the likelihood of attracting the attention of potential attackers, that can be scanned and decoded utilizing a QR code reader. The secret image can be recovered by stacking for case (k, n) based on the human visual system without any computation. In addition, it can assist alignment for VSS recovery. The experiment results show the effectiveness of our scheme.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, a novel visual secret sharing (VSS) scheme with using QR codes is investigated. The proposed visual secret sharing scheme based on QR codes(VSSQR) can visually reveal secret image by stacking k or more shares (shadow images) from all the n QR codes as well as scan the QR code by a QR code reader. Our VSSQR exploits the error correction mechanism in the QR code structure, to embed the bits corresponding to shares generated by VSS from a secret bit into the same locations of QR codes in the processing of encoding QR. Each output share is a valid QR code, which may reduce the likelihood of attracting the attention of potential attackers, that can be scanned and decoded utilizing a QR code reader. The secret image can be recovered by stacking for case (k, n) based on the human visual system without any computation. In addition, it can assist alignment for VSS recovery. The experiment results show the effectiveness of our scheme.", "fno": "07950262", "keywords": [ "Cryptography", "Image Coding", "QR Codes", "Visual Secret Sharing Scheme", "QR Codes", "Secret Image", "Cryptography", "Visualization", "Encoding", "Error Correction Codes", "Stacking", "Visual Systems", "Image Coding", "Visual Secret Sharing", "Visual Cryptography Application", "QR Code", "Error Correction" ], "authors": [ { "affiliation": null, "fullName": "Song Wan", "givenName": "Song", "surname": "Wan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yuliang Lu", "givenName": "Yuliang", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xuehu Yan", "givenName": "Xuehu", "surname": "Yan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Lintao Liu", "givenName": "Lintao", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "msn", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-12-01T00:00:00", "pubType": "proceedings", "pages": "374-379", "year": "2016", "issn": null, "isbn": "978-1-5090-5696-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07950261", "articleId": "12OmNBtUdH0", "__typename": "AdjacentArticleType" }, "next": { "fno": "07950263", "articleId": "12OmNxeutbS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ewdts/2017/3299/0/08110079", "title": "Controlling programmable microcontrollers via QR codes", "doi": null, "abstractUrl": "/proceedings-article/ewdts/2017/08110079/12OmNBSjJ02", "parentPublication": { "id": "proceedings/ewdts/2017/3299/0", "title": "2017 IEEE East-West Design & Test Symposium (EWDTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2013/3211/0/3211a022", "title": "Secret Hiding Mechanism Using QR Barcode", "doi": null, "abstractUrl": "/proceedings-article/sitis/2013/3211a022/12OmNvEhg2p", "parentPublication": { "id": "proceedings/sitis/2013/3211/0", "title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2016/1552/0/07574744", "title": "QR code steganography with secret payload enhancement", "doi": null, "abstractUrl": "/proceedings-article/icmew/2016/07574744/12OmNySosIF", "parentPublication": { "id": "proceedings/icmew/2016/1552/0", "title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2011/4517/0/4517a089", "title": "Offline QR Code Authorization Based on Visual Cryptography", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2011/4517a089/12OmNzWfoWb", "parentPublication": { "id": "proceedings/iih-msp/2011/4517/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/09/08632711", "title": "Micrography QR Codes", "doi": null, "abstractUrl": "/journal/tg/2020/09/08632711/17D45WB0qbr", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candar/2018/9182/0/918200a188", "title": "A Construction of Fake QR Codes Based on Error-Correcting Codes", "doi": null, "abstractUrl": "/proceedings-article/candar/2018/918200a188/17D45WXIkBc", "parentPublication": { "id": "proceedings/candar/2018/9182/0", "title": "2018 Sixth International Symposium on Computing and Networking (CANDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2018/0169/0/016900a233", "title": "Improved Extended Polynomial-Based Secret Image Sharing Scheme Using QR Code", "doi": null, "abstractUrl": "/proceedings-article/cis/2018/016900a233/17D45WnnFWQ", "parentPublication": { "id": "proceedings/cis/2018/0169/0", "title": "2018 14th International Conference on Computational Intelligence and Security (CIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigdatasecurity-hpsc-ids/2017/6296/0/07980322", "title": "Secret Image Sharing Based on Error-Correcting Codes", "doi": null, "abstractUrl": "/proceedings-article/bigdatasecurity-hpsc-ids/2017/07980322/17D45XDIXOJ", "parentPublication": { "id": "proceedings/bigdatasecurity-hpsc-ids/2017/6296/0", "title": "2017 IEEE 3rd International Conference on Big Data Security on Cloud (BigDataSecurity), IEEE International Conference on High Performance and Smart Computing, (HPSC) and IEEE International Conference on Intelligent Data and Security (IDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2018/7744/0/774400a053", "title": "Applying QR Code to Secure Medical Management", "doi": null, "abstractUrl": "/proceedings-article/itme/2018/774400a053/17D45Xh13tD", "parentPublication": { "id": "proceedings/itme/2018/7744/0", "title": "2018 9th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900c277", "title": "ArtCoder: An End-to-end Method for Generating Scanning-robust Stylized QR Codes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900c277/1yeLF0pZaH6", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBNM8Mg", "title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "acronym": "sitis", "groupId": "1002425", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNxdDFDH", "doi": "10.1109/SITIS.2015.42", "title": "Development of Color QR Code for Increasing Capacity", "normalizedTitle": "Development of Color QR Code for Increasing Capacity", "abstract": "Barcodes have been widely popular. Their popularity has encouraged an ongoing invention of decoding methods. Barcodes can be categorized into 2 main groups, namely one-dimension (1D) barcodes at which information is stored horizontally and two-dimension (2D) barcodes which contain information in both vertical and horizontal direction, promising a higher storage capacity compared to 1D barcodes. Despite high data density, an amount of information obtained in 2D barcodes still limited to some extent. This study selected QR Code (Quick Response Code) is a type of 2D barcode because firstly, it can handle a variety of information. Secondly, decoding is reasonably straightforward. Finally, the structure of QR code is specified clearly by its developer. This research aimed to increase QR Code capacity by proposing a color Quick Response Code (color QR code) encoding concept which can hold a larger amount of information than that of the traditional black and white QR Code regarding their physical size. A two-color (black and white) QR Code can store 1 bit in each module only, whereas a module of a color QR code with sixteen different colors can contain 4-bit data. In order to decode a color QR code, this study used a code reader equipped with at least an 8-megapixel camera and a decoding application was developed on Android (Android application on mobile phone) and Java (Java application on PC) platform.", "abstracts": [ { "abstractType": "Regular", "content": "Barcodes have been widely popular. Their popularity has encouraged an ongoing invention of decoding methods. Barcodes can be categorized into 2 main groups, namely one-dimension (1D) barcodes at which information is stored horizontally and two-dimension (2D) barcodes which contain information in both vertical and horizontal direction, promising a higher storage capacity compared to 1D barcodes. Despite high data density, an amount of information obtained in 2D barcodes still limited to some extent. This study selected QR Code (Quick Response Code) is a type of 2D barcode because firstly, it can handle a variety of information. Secondly, decoding is reasonably straightforward. Finally, the structure of QR code is specified clearly by its developer. This research aimed to increase QR Code capacity by proposing a color Quick Response Code (color QR code) encoding concept which can hold a larger amount of information than that of the traditional black and white QR Code regarding their physical size. A two-color (black and white) QR Code can store 1 bit in each module only, whereas a module of a color QR code with sixteen different colors can contain 4-bit data. In order to decode a color QR code, this study used a code reader equipped with at least an 8-megapixel camera and a decoding application was developed on Android (Android application on mobile phone) and Java (Java application on PC) platform.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Barcodes have been widely popular. Their popularity has encouraged an ongoing invention of decoding methods. Barcodes can be categorized into 2 main groups, namely one-dimension (1D) barcodes at which information is stored horizontally and two-dimension (2D) barcodes which contain information in both vertical and horizontal direction, promising a higher storage capacity compared to 1D barcodes. Despite high data density, an amount of information obtained in 2D barcodes still limited to some extent. This study selected QR Code (Quick Response Code) is a type of 2D barcode because firstly, it can handle a variety of information. Secondly, decoding is reasonably straightforward. Finally, the structure of QR code is specified clearly by its developer. This research aimed to increase QR Code capacity by proposing a color Quick Response Code (color QR code) encoding concept which can hold a larger amount of information than that of the traditional black and white QR Code regarding their physical size. A two-color (black and white) QR Code can store 1 bit in each module only, whereas a module of a color QR code with sixteen different colors can contain 4-bit data. In order to decode a color QR code, this study used a code reader equipped with at least an 8-megapixel camera and a decoding application was developed on Android (Android application on mobile phone) and Java (Java application on PC) platform.", "fno": "9721a645", "keywords": [ "Image Color Analysis", "Decoding", "Encoding", "Standards", "Cameras", "Java", "Image Coding", "HSV Color Model", "QR Code", "Two Dimensional Barcode", "QR Code", "Color QR Code", "Quick Response Code" ], "authors": [ { "affiliation": null, "fullName": "Nutchanad Taveerad", "givenName": "Nutchanad", "surname": "Taveerad", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Sartid Vongpradhip", "givenName": "Sartid", "surname": "Vongpradhip", "__typename": "ArticleAuthorType" } ], "idPrefix": "sitis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-11-01T00:00:00", "pubType": "proceedings", "pages": "645-648", "year": "2015", "issn": null, "isbn": "978-1-4673-9721-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "9721a639", "articleId": "12OmNCesr4T", "__typename": "AdjacentArticleType" }, "next": { "fno": "9721a649", "articleId": "12OmNz5JCfV", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icoit/2016/3584/0/07966807", "title": "An Introduction to QR Code Technology", "doi": null, "abstractUrl": "/proceedings-article/icoit/2016/07966807/12OmNqzu6Vk", "parentPublication": { "id": "proceedings/icoit/2016/3584/0", "title": "2016 International Conference on Information Technology (ICIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccgi/2008/3275/0/3275a050", "title": "Contextual QR Codes", "doi": null, "abstractUrl": "/proceedings-article/iccgi/2008/3275a050/12OmNrkjVjF", "parentPublication": { "id": "proceedings/iccgi/2008/3275/0", "title": "Computing in the Global Information Technology, International Multi-Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2014/6854/0/6854a154", "title": "An Improved Algorithm for QR Code Image Binarization", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2014/6854a154/12OmNwlZu3Q", "parentPublication": { "id": "proceedings/icvrv/2014/6854/0", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130335", "title": "A theory of color barcodes", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130335/12OmNyQ7FOF", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3pgcic/2010/4237/0/4237a454", "title": "Image Processing of Dotted Picture in the QR Code of Cellular Phone", "doi": null, "abstractUrl": "/proceedings-article/3pgcic/2010/4237a454/12OmNzvhvBt", "parentPublication": { "id": "proceedings/3pgcic/2010/4237/0", "title": "P2P, Parallel, Grid, Cloud, and Internet Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2018/4195/0/08551539", "title": "Integration of Graphic QR code and Identity Documents by Laser Perforation to Enhance Anti-Counterfeiting Features", "doi": null, "abstractUrl": "/proceedings-article/icmew/2018/08551539/17D45W1Oa5H", "parentPublication": { "id": "proceedings/icmew/2018/4195/0", "title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/09/08632711", "title": "Micrography QR Codes", "doi": null, "abstractUrl": "/journal/tg/2020/09/08632711/17D45WB0qbr", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnisc/2018/6956/0/695600a107", "title": "Research on the Optimal Threshold of QR Code Recognition Based on Maximum Likelihood Criterion", "doi": null, "abstractUrl": "/proceedings-article/icnisc/2018/695600a107/1dUo3knn0VW", "parentPublication": { "id": "proceedings/icnisc/2018/6956/0", "title": "2018 4th Annual International Conference on Network and Information Systems for Computers (ICNISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09105961", "title": "Deep Restoration of Invisible QR Code from TPVM Display", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09105961/1kwqEjp5irC", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2021/4254/0/425400a349", "title": "A Robust video watermarking approach based on QR code", "doi": null, "abstractUrl": "/proceedings-article/iccst/2021/425400a349/1ziPi4jFHRm", "parentPublication": { "id": "proceedings/iccst/2021/4254/0", "title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19wAWB56iE8", "title": "2018 International Conference on Control, Artificial Intelligence, Robotics & Optimization (ICCAIRO)", "acronym": "iccairo", "groupId": "1823864", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19wAXxFz4bK", "doi": "10.1109/ICCAIRO.2018.00047", "title": "Deep Convolutional Encoder-Decoder Architecture for Neuronal Structure Segmentation", "normalizedTitle": "Deep Convolutional Encoder-Decoder Architecture for Neuronal Structure Segmentation", "abstract": "Electro microscopic connectomics is a practical application of research direction. It determines whether a nerve is damaged by judging the connectivity of nerves. However, the formidable size of Electro Microscopic (EM) image data generated by serial-section Transmitted Electron Microscopy (ssTEM) severely depends on human annotation, which is impractical. One of the main challenges in connectomics research is to take minimal user intervention into account during neuronal structures automatic segmentation. To address this problem, a network is constructed to segment neuronal structures automatically, which expands receptive field of feature maps. Besides, we also introduce data augmentation method to use the available training data more efficiently. Our model is proposed based on a context network, and its architecture consists of an encoding path that enables feature extraction. The novel introduction of summation-based skip connection is aimed to connect decoding path with encoding path. Finally, real experiments with ISBI EM dataset validate the approach.", "abstracts": [ { "abstractType": "Regular", "content": "Electro microscopic connectomics is a practical application of research direction. It determines whether a nerve is damaged by judging the connectivity of nerves. However, the formidable size of Electro Microscopic (EM) image data generated by serial-section Transmitted Electron Microscopy (ssTEM) severely depends on human annotation, which is impractical. One of the main challenges in connectomics research is to take minimal user intervention into account during neuronal structures automatic segmentation. To address this problem, a network is constructed to segment neuronal structures automatically, which expands receptive field of feature maps. Besides, we also introduce data augmentation method to use the available training data more efficiently. Our model is proposed based on a context network, and its architecture consists of an encoding path that enables feature extraction. The novel introduction of summation-based skip connection is aimed to connect decoding path with encoding path. Finally, real experiments with ISBI EM dataset validate the approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Electro microscopic connectomics is a practical application of research direction. It determines whether a nerve is damaged by judging the connectivity of nerves. However, the formidable size of Electro Microscopic (EM) image data generated by serial-section Transmitted Electron Microscopy (ssTEM) severely depends on human annotation, which is impractical. One of the main challenges in connectomics research is to take minimal user intervention into account during neuronal structures automatic segmentation. To address this problem, a network is constructed to segment neuronal structures automatically, which expands receptive field of feature maps. Besides, we also introduce data augmentation method to use the available training data more efficiently. Our model is proposed based on a context network, and its architecture consists of an encoding path that enables feature extraction. The novel introduction of summation-based skip connection is aimed to connect decoding path with encoding path. Finally, real experiments with ISBI EM dataset validate the approach.", "fno": "08698405", "keywords": [ "Image Segmentation", "Encoding", "Deep Learning", "Task Analysis", "Computer Architecture", "Decoding", "Network Architecture", "Connectomics", "Semantic Segmentation", "CNN", "Deep Learning" ], "authors": [ { "affiliation": "East China Normal University, School of computer Science and Software Engineering, Shanghai, China", "fullName": "Qingqing Cui", "givenName": "Qingqing", "surname": "Cui", "__typename": "ArticleAuthorType" }, { "affiliation": "East China Normal University, School of computer Science and Software Engineering, Shanghai, China", "fullName": "Peng Pu", "givenName": "Peng", "surname": "Pu", "__typename": "ArticleAuthorType" }, { "affiliation": "East China Normal University, School of computer Science and Software Engineering, Shanghai, China", "fullName": "Lu Chen", "givenName": "Lu", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "East China Normal University, School of computer Science and Software Engineering, Shanghai, China", "fullName": "Wenzheng Zhao", "givenName": "Wenzheng", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "East China Normal University, School of computer Science and Software Engineering, Shanghai, China", "fullName": "Yu Liu", "givenName": "Yu", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccairo", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-05-01T00:00:00", "pubType": "proceedings", "pages": "242-247", "year": "2018", "issn": null, "isbn": "978-1-5386-9576-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08698423", "articleId": "19wAZnnkEG4", "__typename": "AdjacentArticleType" }, "next": { "fno": "08698422", "articleId": "19wAYSNcbN6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032c410", "title": "Multi-stage Multi-recursive-input Fully Convolutional Networks for Neuronal Boundary Detection", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032c410/12OmNB8kHOV", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851a193", "title": "Object Contour Detection with a Fully Convolutional Encoder-Decoder Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851a193/12OmNBKmXdO", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2017/2939/0/08078547", "title": "Background subtraction using encoder-decoder structured convolutional neural network", "doi": null, "abstractUrl": "/proceedings-article/avss/2017/08078547/12OmNx57HKN", "parentPublication": { "id": "proceedings/avss/2017/2939/0", "title": "2017 14th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2017/3586/1/3586a708", "title": "Using Convolutional Encoder-Decoder for Document Image Binarization", "doi": null, "abstractUrl": "/proceedings-article/icdar/2017/3586a708/12OmNzn38XW", "parentPublication": { "id": "proceedings/icdar/2017/3586/1", "title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/12/07803544", "title": "SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation", "doi": null, "abstractUrl": "/journal/tp/2017/12/07803544/13rRUwjXZL2", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2015/09/06882242", "title": "Efficient hardware implementation of encoder and decoder for golay code", "doi": null, "abstractUrl": "/journal/si/2015/09/06882242/13rRUygBw55", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nas/2018/8367/0/08515696", "title": "Personalized Behavior Prediction with Encoder-to-Decoder Structure", "doi": null, "abstractUrl": "/proceedings-article/nas/2018/08515696/17D45WaTkc7", "parentPublication": { "id": "proceedings/nas/2018/8367/0", "title": "2018 IEEE International Conference on Networking, Architecture and Storage (NAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000j145", "title": "Light Field Intrinsics with a Deep Encoder-Decoder Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000j145/17D45WaTkeO", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d688", "title": "EyeNet: Attention Based Convolutional Encoder-Decoder Network for Eye Region Segmentation", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d688/1i5msUQsh1u", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413286", "title": "Encoder-Decoder Based Convolutional Neural Networks with Multi-Scale-Aware Modules for Crowd Counting", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413286/1tmiGAPXwxG", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G55WEFExd6", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G578ylCePe", "doi": "10.1109/CVPRW56347.2022.00190", "title": "Super-Resolution based Video Coding Scheme", "normalizedTitle": "Super-Resolution based Video Coding Scheme", "abstract": "In this paper, we present a super-resolution-based video coding scheme that compresses video data by combining traditional hybrid video coding and Convolutional neural network-based video coding. During video encoding, downsampling reduces the resolution of an original video in both horizontal and vertical directions to reduce original video data, and Convolutional neural networkbased super-resolution is employed after the decoding process to recover the resolution of the reconstructed video during upsampling. For core encoding and decoding processes, the latest video coding standard (i.e., VVC/H.266) is conducted. The experimental results show that the proposed method can provide efficient coding performance while maintaining good visual quality.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a super-resolution-based video coding scheme that compresses video data by combining traditional hybrid video coding and Convolutional neural network-based video coding. During video encoding, downsampling reduces the resolution of an original video in both horizontal and vertical directions to reduce original video data, and Convolutional neural networkbased super-resolution is employed after the decoding process to recover the resolution of the reconstructed video during upsampling. For core encoding and decoding processes, the latest video coding standard (i.e., VVC/H.266) is conducted. The experimental results show that the proposed method can provide efficient coding performance while maintaining good visual quality.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a super-resolution-based video coding scheme that compresses video data by combining traditional hybrid video coding and Convolutional neural network-based video coding. During video encoding, downsampling reduces the resolution of an original video in both horizontal and vertical directions to reduce original video data, and Convolutional neural networkbased super-resolution is employed after the decoding process to recover the resolution of the reconstructed video during upsampling. For core encoding and decoding processes, the latest video coding standard (i.e., VVC/H.266) is conducted. The experimental results show that the proposed method can provide efficient coding performance while maintaining good visual quality.", "fno": "873900b777", "keywords": [ "Image Reconstruction", "Image Resolution", "Image Sampling", "Video Coding", "Super Resolution Based Video Coding Scheme", "Video Encoding", "Video Data", "Convolutional Neural Network Based Super Resolution", "Reconstructed Video", "Core Encoding", "Decoding Process", "Video Coding Standard", "Convolutional Neural Network Based Video Coding", "Hybrid Video Coding", "Video Coding", "Visualization", "Image Coding", "Superresolution", "Encoding", "Decoding", "Pattern Recognition" ], "authors": [ { "affiliation": "Gacheon University,School of Computing,Seongnam-si,Korea", "fullName": "Hyun Min Cho", "givenName": "Hyun Min", "surname": "Cho", "__typename": "ArticleAuthorType" }, { "affiliation": "Gacheon University,School of Computing,Seongnam-si,Korea", "fullName": "Kiho Choi", "givenName": "Kiho", "surname": "Choi", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "1777-1779", "year": "2022", "issn": null, "isbn": "978-1-6654-8739-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "873900b773", "articleId": "1G578ULVvVe", "__typename": "AdjacentArticleType" }, "next": { "fno": "873900b780", "articleId": "1G56g11jeec", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2011/348/0/06011968", "title": "Decoder driven multi resolution side information refinements and mode decisions for improved rate-distortion performance in distributed video coding", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011968/12OmNBpEeQP", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2016/1853/0/07786154", "title": "Adaptive Motion Vector Resolution Scheme for Enhanced Video Coding", "doi": null, "abstractUrl": "/proceedings-article/dcc/2016/07786154/12OmNrH1PFP", "parentPublication": { "id": "proceedings/dcc/2016/1853/0", "title": "2016 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1994/6952/2/00413495", "title": "Temporal resolution scalable video coding", "doi": null, "abstractUrl": "/proceedings-article/icip/1994/00413495/12OmNy3AgEL", "parentPublication": { "id": "proceedings/icip/1994/6952/2", "title": "Proceedings of 1st International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/208P2B07", "title": "Geometry constrained sparse coding for single image super-resolution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/208P2B07/12OmNywxlNB", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200e501", "title": "Efficient Video Compression via Content-Adaptive Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200e501/1BmGANYOwms", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2022/7218/0/09859373", "title": "LFC-SASR: Light Field Coding Using Spatial and Angular Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/icmew/2022/09859373/1G4F0ndbVoQ", "parentPublication": { "id": "proceedings/icmew/2022/7218/0", "title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600r7803", "title": "Reference-based Video Super-Resolution Using Multi-Camera Video Triplets", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600r7803/1H1l60QEy64", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2022/5478/0/547800a291", "title": "3D Deformable Kernels for Video super-resolution", "doi": null, "abstractUrl": "/proceedings-article/icdh/2022/547800a291/1JeDsccBp04", "parentPublication": { "id": "proceedings/icdh/2022/5478/0", "title": "2022 9th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100b513", "title": "An Efficient Network Design for Face Video Super-resolution", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100b513/1yNii0RK4eI", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900c480", "title": "EVSRNet: Efficient Video Super-Resolution with Neural Architecture Search", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900c480/1yVA6c7U74Y", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrF2DIa", "title": "2017 21st International Conference Information Visualisation (IV)", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNBTs7ow", "doi": "10.1109/iV.2017.37", "title": "Architecture Proposal for Data Extraction of Chart Images Using Convolutional Neural Network", "normalizedTitle": "Architecture Proposal for Data Extraction of Chart Images Using Convolutional Neural Network", "abstract": "Different information visualization techniques can be found in the literature due to the quantity and variety of data stored in computational systems. In this context, the classification of chart images becomes important because it allows various types of graphs to be detected automatically in different contexts, allowing a more specific processing for each type of visualization, for example, data extraction. Several techniques of image classification can be used, where the most common are based on the extraction of features of the images, and a later classification using these features. However, one technique that has been gaining prominence in the context of image classification is the Convolutional Neural Network (CNN). This technique is based on deep learning and, in a way, encapsulates the feature extraction process. In this way, the proposal of this article is to use an architecture of a client-server based model to do the chart image classification and later data extraction from this image. The main advantage is doing the CNN processing on the server side, so the application does not rely on client device limitations. For this, an image dataset was generated from the web, and it has ten classes of graphs. From the experiments done, it was seen that the use of this technique was feasible, and modifications in the architecture can be made as a proposal to improve the accuracy of the model.", "abstracts": [ { "abstractType": "Regular", "content": "Different information visualization techniques can be found in the literature due to the quantity and variety of data stored in computational systems. In this context, the classification of chart images becomes important because it allows various types of graphs to be detected automatically in different contexts, allowing a more specific processing for each type of visualization, for example, data extraction. Several techniques of image classification can be used, where the most common are based on the extraction of features of the images, and a later classification using these features. However, one technique that has been gaining prominence in the context of image classification is the Convolutional Neural Network (CNN). This technique is based on deep learning and, in a way, encapsulates the feature extraction process. In this way, the proposal of this article is to use an architecture of a client-server based model to do the chart image classification and later data extraction from this image. The main advantage is doing the CNN processing on the server side, so the application does not rely on client device limitations. For this, an image dataset was generated from the web, and it has ten classes of graphs. From the experiments done, it was seen that the use of this technique was feasible, and modifications in the architecture can be made as a proposal to improve the accuracy of the model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Different information visualization techniques can be found in the literature due to the quantity and variety of data stored in computational systems. In this context, the classification of chart images becomes important because it allows various types of graphs to be detected automatically in different contexts, allowing a more specific processing for each type of visualization, for example, data extraction. Several techniques of image classification can be used, where the most common are based on the extraction of features of the images, and a later classification using these features. However, one technique that has been gaining prominence in the context of image classification is the Convolutional Neural Network (CNN). This technique is based on deep learning and, in a way, encapsulates the feature extraction process. In this way, the proposal of this article is to use an architecture of a client-server based model to do the chart image classification and later data extraction from this image. The main advantage is doing the CNN processing on the server side, so the application does not rely on client device limitations. For this, an image dataset was generated from the web, and it has ten classes of graphs. From the experiments done, it was seen that the use of this technique was feasible, and modifications in the architecture can be made as a proposal to improve the accuracy of the model.", "fno": "0831a318", "keywords": [ "Data Visualisation", "Feature Extraction", "Image Classification", "Learning Artificial Intelligence", "Neural Nets", "Chart Image Classification", "Data Extraction", "CNN Processing", "Image Dataset", "Convolutional Neural Network", "Information Visualization", "Feature Extraction", "Deep Learning", "Feature Extraction", "Data Mining", "Data Visualization", "Servers", "Computer Architecture", "Image Classification", "Chart Image Classification", "Data Extraction", "Convolutional Neural Network" ], "authors": [ { "affiliation": "Univ. Fed. do Para, Belém, Brazil", "fullName": "Paulo Chagas", "givenName": "Paulo", "surname": "Chagas", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. Fed. do Para, Belém, Brazil", "fullName": "Alexandre Freitas", "givenName": "Alexandre", "surname": "Freitas", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. Fed. do Para, Belém, Brazil", "fullName": "Rafael Daisuke", "givenName": "Rafael", "surname": "Daisuke", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. Fed. do Para, Belém, Brazil", "fullName": "Brunelli Miranda", "givenName": "Brunelli", "surname": "Miranda", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. Fed. do Para, Belém, Brazil", "fullName": "Tiago Davi Oliveira De Araújo", "givenName": "Tiago Davi Oliveira De", "surname": "Araújo", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. Fed. do Para, Belém, Brazil", "fullName": "Carlos Santos", "givenName": "Carlos", "surname": "Santos", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. Fed. do Para, Belém, Brazil", "fullName": "Bianchi Meiguins", "givenName": "Bianchi", "surname": "Meiguins", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. Fed. do Para, Belém, Brazil", "fullName": "Jefferson Magalhães De Morais", "givenName": "Jefferson Magalhães De", "surname": "Morais", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-07-01T00:00:00", "pubType": "proceedings", "pages": "318-323", "year": "2017", "issn": "2375-0138", "isbn": "978-1-5386-0831-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "0831a312", "articleId": "12OmNBZYTs2", "__typename": "AdjacentArticleType" }, "next": { "fno": "0831a324", "articleId": "12OmNqJ8toe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdar/2015/1805/0/07333872", "title": "Chart classification by combining deep convolutional networks and deep belief networks", "doi": null, "abstractUrl": "/proceedings-article/icdar/2015/07333872/12OmNwt5shU", "parentPublication": { "id": "proceedings/icdar/2015/1805/0", "title": "2015 13th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2007/2822/1/28220521", "title": "Extraction of Vectorized Graphical Information from Scientific Chart Images", "doi": null, "abstractUrl": "/proceedings-article/icdar/2007/28220521/12OmNyq0zMB", "parentPublication": { "id": "proceedings/icdar/2007/2822/1", "title": "Ninth International Conference on Document Analysis and Recognition (ICDAR 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2018/7202/0/720200a128", "title": "Synthetic Chart Image Generator: An Application for Generating Chart Image Datasets", "doi": null, "abstractUrl": "/proceedings-article/iv/2018/720200a128/17D45X0yjUm", "parentPublication": { "id": "proceedings/iv/2018/7202/0", "title": "2018 22nd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2018/7447/0/744701a970", "title": "Two Strategies for Bag-of-Visual Words Feature Extraction", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2018/744701a970/19m3HJBKauk", "parentPublication": { "id": "proceedings/iiai-aai/2018/7447/0", "title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500c553", "title": "Parsing Line Chart Images Using Linear Programming", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500c553/1B13SlHr1UQ", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956289", "title": "ICPR 2022: Challenge on Harvesting Raw Tables from Infographics (CHART-Infographics)", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956289/1IHpRwfpyHC", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2019/3014/0/301400b594", "title": "ICDAR 2019 Competition on Harvesting Raw Tables from Infographics (CHART-Infographics)", "doi": null, "abstractUrl": "/proceedings-article/icdar/2019/301400b594/1h81uWriJLa", "parentPublication": { "id": "proceedings/icdar/2019/3014/0", "title": "2019 International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/11/09085944", "title": "Chart Mining: A Survey of Methods for Automated Chart Analysis", "doi": null, "abstractUrl": "/journal/tp/2021/11/09085944/1jE1Hu1xUzu", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09293003", "title": "Chartem: Reviving Chart Images with Data Embedding", "doi": null, "abstractUrl": "/journal/tg/2021/02/09293003/1pyonCyir8k", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412153", "title": "Visual Style Extraction from Chart Images for Chart Restyling", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412153/1tmiHY12xy0", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxwENvc", "title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", "acronym": "icassp", "groupId": "1000002", "volume": "3", "displayVolume": "3", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNCyTys9", "doi": "10.1109/ICASSP.2004.1326564", "title": "A sequential multiple watermarks embedding technique", "normalizedTitle": "A sequential multiple watermarks embedding technique", "abstract": "In this paper, we propose a novel multiple watermarks embedding scheme. We assume M watermarks have already embedded in the image using M sets of secret key. With the availability of these M sets secret key, another N watermarks can be embedded using the proposed technique while the energies of the watermarks are minimized. And the watermarks embedded later will not interfere with the first M watermarks. Experimental results show watermarked images have good visual quality and the watermarks are robust to JPEG compression and noise attacks.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a novel multiple watermarks embedding scheme. We assume M watermarks have already embedded in the image using M sets of secret key. With the availability of these M sets secret key, another N watermarks can be embedded using the proposed technique while the energies of the watermarks are minimized. And the watermarks embedded later will not interfere with the first M watermarks. Experimental results show watermarked images have good visual quality and the watermarks are robust to JPEG compression and noise attacks.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a novel multiple watermarks embedding scheme. We assume M watermarks have already embedded in the image using M sets of secret key. With the availability of these M sets secret key, another N watermarks can be embedded using the proposed technique while the energies of the watermarks are minimized. And the watermarks embedded later will not interfere with the first M watermarks. Experimental results show watermarked images have good visual quality and the watermarks are robust to JPEG compression and noise attacks.", "fno": "01326564", "keywords": [ "Watermarking", "Data Encapsulation", "Data Compression", "Image Coding", "Multiple Watermark Embedding", "Secret Key", "Energy Minimization", "Visual Quality", "JPEG Compression", "Noise Attack Robustness", "Sequential Watermark Embedding", "Watermarking", "Crosstalk", "Noise Robustness", "Decoding", "Transform Coding", "Image Coding", "World Wide Web" ], "authors": [ { "affiliation": "Dept. of Electr. & Electron. Eng., Hong Kong Univ. of Sci. & Technol., China", "fullName": "P.H.W. Wong", "givenName": "P.H.W.", "surname": "Wong", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "A. Chang", "givenName": "A.", "surname": "Chang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "O.C. Au", "givenName": "O.C.", "surname": "Au", "__typename": "ArticleAuthorType" } ], "idPrefix": "icassp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-01-01T00:00:00", "pubType": "proceedings", "pages": "iii-393-6 vol.3", "year": "2004", "issn": "1520-6149", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01326563", "articleId": "12OmNwDACbd", "__typename": "AdjacentArticleType" }, "next": { "fno": "01326565", "articleId": "12OmNzZmZtf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iihmsp/2006/2745/0/04041763", "title": "Audio Integrity Protection and Falsification Estimation by Embedding Multiple Watermarks", "doi": null, "abstractUrl": "/proceedings-article/iihmsp/2006/04041763/12OmNAg7jZN", "parentPublication": { "id": "proceedings/iihmsp/2006/2745/0", "title": "2006 International Conference on Intelligent Information Hiding and Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/host/2010/7811/0/05513115", "title": "Provably secure obfuscation of diverse watermarks for sequential circuits", "doi": null, "abstractUrl": "/proceedings-article/host/2010/05513115/12OmNqBKTRy", "parentPublication": { "id": "proceedings/host/2010/7811/0", "title": "2010 IEEE International Symposium on Hardware-Oriented Security and Trust (HOST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2002/7402/2/05744039", "title": "Embedding and detecting spread-spectrum watermarks under estimation attacks", "doi": null, "abstractUrl": "/proceedings-article/icassp/2002/05744039/12OmNwGIczR", "parentPublication": { "id": "proceedings/icassp/2002/7402/2", "title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2006/2745/0/27450469", "title": "Audio Integrity Protection and Falsification Estimation by Embedding Multiple Watermarks", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2006/27450469/12OmNwlHSRu", "parentPublication": { "id": "proceedings/iih-msp/2006/2745/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1998/8821/1/882110474", "title": "Embedding visible video watermarks in the compressed domain", "doi": null, "abstractUrl": "/proceedings-article/icip/1998/882110474/12OmNwoPtwh", "parentPublication": { "id": "proceedings/icip/1998/8821/1", "title": "Proceedings of IPCIP'98 International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icccnt/2012/9999/0/06396078", "title": "A blind watermarking using MSB insertion to embed multiple watermarks", "doi": null, "abstractUrl": "/proceedings-article/icccnt/2012/06396078/12OmNx3q72y", "parentPublication": { "id": "proceedings/icccnt/2012/9999/0", "title": "2012 Third International Conference on Computing, Communication and Networking Technologies (ICCCNT 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dmamh/2007/3065/0/30650015", "title": "Performance Evaluation of Multiple Watermarks System", "doi": null, "abstractUrl": "/proceedings-article/dmamh/2007/30650015/12OmNxETanq", "parentPublication": { "id": "proceedings/dmamh/2007/3065/0", "title": "Digital Media and its Application in Museum &amp; Heritage/Digital Media and its Application in Museum &amp; Heritage, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2007/2994/1/29940303", "title": "Hiding Multiple Watermarks in Transparencies of Visual Cryptography", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2007/29940303/12OmNzVGcNw", "parentPublication": { "id": "iih-msp/2007/2994/1", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2018/9385/0/938500a637", "title": "Halftone Modulation for Embedding UV Watermarks in Color Printed Images", "doi": null, "abstractUrl": "/proceedings-article/sitis/2018/938500a637/19RSwbPJx4s", "parentPublication": { "id": "proceedings/sitis/2018/9385/0", "title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2021/0898/0/089800a169", "title": "DeepMark: Embedding Watermarks into Deep Neural Network Using Pruning", "doi": null, "abstractUrl": "/proceedings-article/ictai/2021/089800a169/1zw63wnGaNq", "parentPublication": { "id": "proceedings/ictai/2021/0898/0", "title": "2021 IEEE 33rd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAWH9tO", "title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)", "acronym": "icassp", "groupId": "1000002", "volume": "4", "displayVolume": "4", "year": "2002", "__typename": "ProceedingType" }, "article": { "id": "12OmNsd6vpS", "doi": "10.1109/ICASSP.2002.5745405", "title": "High-volume data hiding in images: Introducing perceptual criteria into quantization based embedding", "normalizedTitle": "High-volume data hiding in images: Introducing perceptual criteria into quantization based embedding", "abstract": "Information-theoretic analyses for data hiding prescribe embedding the hidden data in the choice of quantizer for the host data. In this paper, we consider a suboptimal implementation of this prescription, with a view to hiding high volumes of data in images with low perceptual degradation. Our two main findings are as follows: (a) In order to limit perceptual distortion while hiding large amounts of data, the hiding scheme must use perceptual criteria in addition to information-theoretic guidelines. (b) By focusing on “benign” JPEG compression attacks, we are able to attain very high volumes of embedded data, comparable to information-theoretic capacity estimates for the more malicious Additive White Gaussian Noise (AWGN) attack channel, using relatively simple embedding techniques.", "abstracts": [ { "abstractType": "Regular", "content": "Information-theoretic analyses for data hiding prescribe embedding the hidden data in the choice of quantizer for the host data. In this paper, we consider a suboptimal implementation of this prescription, with a view to hiding high volumes of data in images with low perceptual degradation. Our two main findings are as follows: (a) In order to limit perceptual distortion while hiding large amounts of data, the hiding scheme must use perceptual criteria in addition to information-theoretic guidelines. (b) By focusing on “benign” JPEG compression attacks, we are able to attain very high volumes of embedded data, comparable to information-theoretic capacity estimates for the more malicious Additive White Gaussian Noise (AWGN) attack channel, using relatively simple embedding techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Information-theoretic analyses for data hiding prescribe embedding the hidden data in the choice of quantizer for the host data. In this paper, we consider a suboptimal implementation of this prescription, with a view to hiding high volumes of data in images with low perceptual degradation. Our two main findings are as follows: (a) In order to limit perceptual distortion while hiding large amounts of data, the hiding scheme must use perceptual criteria in addition to information-theoretic guidelines. (b) By focusing on “benign” JPEG compression attacks, we are able to attain very high volumes of embedded data, comparable to information-theoretic capacity estimates for the more malicious Additive White Gaussian Noise (AWGN) attack channel, using relatively simple embedding techniques.", "fno": "05745405", "keywords": [ "Image Coding", "Transform Coding", "Watermarking", "Quantization" ], "authors": [ { "affiliation": "Dept. of Electrical and Computer Engineering, University of California at Santa Barbara, 93106, USA", "fullName": "K. Solanki", "givenName": "K.", "surname": "Solanki", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electrical and Computer Engineering, University of California at Santa Barbara, 93106, USA", "fullName": "N. Jacobsen", "givenName": "N.", "surname": "Jacobsen", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electrical and Computer Engineering, University of California at Santa Barbara, 93106, USA", "fullName": "S. Chandrasekaran", "givenName": "S.", "surname": "Chandrasekaran", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electrical and Computer Engineering, University of California at Santa Barbara, 93106, USA", "fullName": "U. Madhow", "givenName": "U.", "surname": "Madhow", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electrical and Computer Engineering, University of California at Santa Barbara, 93106, USA", "fullName": "B. S. Manjunath", "givenName": "B. S.", "surname": "Manjunath", "__typename": "ArticleAuthorType" } ], "idPrefix": "icassp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2002-05-01T00:00:00", "pubType": "proceedings", "pages": "IV-3485-IV-3488", "year": "2002", "issn": "1520-6149", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05745404", "articleId": "12OmNqHItz0", "__typename": "AdjacentArticleType" }, "next": { "fno": "05745406", "articleId": "12OmNx3q6U6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccae/2009/3569/0/3569a181", "title": "A New Steganographic Method Using Quantization Index Modulation", "doi": null, "abstractUrl": "/proceedings-article/iccae/2009/3569a181/12OmNAQJzVp", "parentPublication": { "id": "proceedings/iccae/2009/3569/0", "title": "2009 International Conference on Computer and Automation Engineering. ICCAE 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2008/2570/0/04607584", "title": "An efficient data representation scheme for complete video quality preserving data hiding", "doi": null, "abstractUrl": "/proceedings-article/icme/2008/04607584/12OmNAYoKoU", "parentPublication": { "id": "proceedings/icme/2008/2570/0", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1997/8316/1/00680525", "title": "Media compression via data hiding", "doi": null, "abstractUrl": "/proceedings-article/acssc/1997/00680525/12OmNCcbEji", "parentPublication": { "id": "proceedings/acssc/1997/8316/1", "title": "Conference Record of the Thirty-First Asilomar Conference on Signals, Systems and Computers (Cat. No.97CB36163)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asiajcis/2012/4776/0/06298125", "title": "Reversible Data Hiding for JPEG Based on EMD", "doi": null, "abstractUrl": "/proceedings-article/asiajcis/2012/06298125/12OmNCf1Dl7", "parentPublication": { "id": "proceedings/asiajcis/2012/4776/0", "title": "2012 Seventh Asia Joint Conference on Information Security (ASIA JCIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/milcom/2002/7625/1/01180477", "title": "Image adaptive high volume data hiding based on scalar quantization", "doi": null, "abstractUrl": "/proceedings-article/milcom/2002/01180477/12OmNvA1hpD", "parentPublication": { "id": "proceedings/milcom/2002/7625/2", "title": "Military Communications Conference (MILCOM 2002)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icie/2010/4080/2/05571284", "title": "A Reversible Data Hiding Algorithm Based on Multiple Extended Codebooks", "doi": null, "abstractUrl": "/proceedings-article/icie/2010/05571284/12OmNxRWI9J", "parentPublication": { "id": "proceedings/icie/2010/4080/2", "title": "Information Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1997/8316/2/00679056", "title": "Locally-adaptive perceptual quantization without side information for DCT coefficients", "doi": null, "abstractUrl": "/proceedings-article/acssc/1997/00679056/12OmNzBOibX", "parentPublication": { "id": "proceedings/acssc/1997/8316/2", "title": "Conference Record of the Thirty-First Asilomar Conference on Signals, Systems and Computers (Cat. No.97CB36163)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/3/01394704", "title": "A VQ-based image-in-image data hiding scheme", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394704/12OmNzRqdGc", "parentPublication": { "id": "proceedings/icme/2004/8603/3", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890128", "title": "Enhanced perceptual image authentication with tamper localization and self-restoration", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890128/12OmNzYeAXX", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/act/2009/3915/0/05376830", "title": "M-ary QIM Data Hiding for Error Concealment of Digital Image in JPEG Pipeline", "doi": null, "abstractUrl": "/proceedings-article/act/2009/05376830/13bd1gQYgEb", "parentPublication": { "id": "proceedings/act/2009/3915/0", "title": "Advances in Computing, Control, and Telecommunication Technologies, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyKa5Tk", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNyfdON5", "doi": "10.1109/ICME.2008.4607407", "title": "Optimizing the capacity of distortion-freewatermarking on palette images", "normalizedTitle": "Optimizing the capacity of distortion-freewatermarking on palette images", "abstract": "In a palette image file, each color is pointed by at least one palette entry and each pixel is represented with one of the color pointers. Since many palette images use a portion of palette entries only, this paper presents a distortion-free watermarking on palette images by exploiting the unused entries. It allocates x palette entries to one color such that each pixel of the color is able to embed log2 x bits. In order to achieve high embedding capacity, we formulate the relationship among embedding capacity, color occurrence, and unused palette entries. By solving the formula and allocating the palette entries in a sub-optimal manner, the present scheme provides a roughly optimal capacity. Our experiment demonstrates that the present scheme is of high watermarking capacity.", "abstracts": [ { "abstractType": "Regular", "content": "In a palette image file, each color is pointed by at least one palette entry and each pixel is represented with one of the color pointers. Since many palette images use a portion of palette entries only, this paper presents a distortion-free watermarking on palette images by exploiting the unused entries. It allocates x palette entries to one color such that each pixel of the color is able to embed log2 x bits. In order to achieve high embedding capacity, we formulate the relationship among embedding capacity, color occurrence, and unused palette entries. By solving the formula and allocating the palette entries in a sub-optimal manner, the present scheme provides a roughly optimal capacity. Our experiment demonstrates that the present scheme is of high watermarking capacity.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In a palette image file, each color is pointed by at least one palette entry and each pixel is represented with one of the color pointers. Since many palette images use a portion of palette entries only, this paper presents a distortion-free watermarking on palette images by exploiting the unused entries. It allocates x palette entries to one color such that each pixel of the color is able to embed log2 x bits. In order to achieve high embedding capacity, we formulate the relationship among embedding capacity, color occurrence, and unused palette entries. By solving the formula and allocating the palette entries in a sub-optimal manner, the present scheme provides a roughly optimal capacity. Our experiment demonstrates that the present scheme is of high watermarking capacity.", "fno": "04607407", "keywords": [ "Image Resolution", "Watermarking", "Palette Image File", "Distortion Free Watermarking", "Embedding Capacity", "Color Occurrence", "Unused Palette", "Image Color Analysis", "Pixel", "Watermarking", "Image Coding", "Distortion", "Multimedia Communication", "Bit Rate" ], "authors": [ { "affiliation": "Institute for Infocomm Research (I2R), Singapore", "fullName": "Yongdong Wu", "givenName": null, "surname": "Yongdong Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute for Infocomm Research (I2R), Singapore", "fullName": "Qiming Li", "givenName": null, "surname": "Qiming Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute for Infocomm Research (I2R), Singapore", "fullName": "Feng Bao", "givenName": null, "surname": "Feng Bao", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-06-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": "1945-7871", "isbn": "978-1-4244-2570-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04607406", "articleId": "12OmNzvhvEh", "__typename": "AdjacentArticleType" }, "next": { "fno": "04607408", "articleId": "12OmNqzu6Od", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icip/1997/8183/1/81831830", "title": "Adaptive palette determination for color images based on Kohonen networks", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81831830/12OmNAnMuHl", "parentPublication": { "id": "proceedings/icip/1997/8183/1", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733b058", "title": "PaletteNet: Image Recolorization with Given Color Palette", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b058/12OmNC3Xhrj", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2008/2570/0/04607740", "title": "Attack by colorization of a grey-level image hiding its color palette", "doi": null, "abstractUrl": "/proceedings-article/icme/2008/04607740/12OmNqHqSoc", "parentPublication": { "id": "proceedings/icme/2008/2570/0", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mue/2009/3658/0/3658a197", "title": "High Capacity Image Data Hiding Scheme for Grouping Palette Index", "doi": null, "abstractUrl": "/proceedings-article/mue/2009/3658a197/12OmNweBUQh", "parentPublication": { "id": "proceedings/mue/2009/3658/0", "title": "Multimedia and Ubiquitous Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1991/0003/0/00150940", "title": "Transform coding of color images with limited palette size", "doi": null, "abstractUrl": "/proceedings-article/icassp/1991/00150940/12OmNxisQVQ", "parentPublication": { "id": "proceedings/icassp/1991/0003/0", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2016/2179/0/2179a266", "title": "Texture Compression with Hierarchical Palette", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2016/2179a266/12OmNzYeAKH", "parentPublication": { "id": "proceedings/bigmm/2016/2179/0", "title": "2016 IEEE Second International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1994/1775/2/00389415", "title": "Subband coding of color images with limited palette size", "doi": null, "abstractUrl": "/proceedings-article/icassp/1994/00389415/12OmNzZmZk1", "parentPublication": { "id": "proceedings/icassp/1994/1775/2", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2002/1695/1/169510252", "title": "Luminance Quasi-Preserving Color Quantization for Digital Steganography to Palette-Based Images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/169510252/12OmNzd7bHf", "parentPublication": { "id": "proceedings/icpr/2002/1695/1", "title": "Proceedings of 16th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/act/2009/3915/0/05376636", "title": "A Common Palette Creation Algorithm for Multiple Images with Transparency Information", "doi": null, "abstractUrl": "/proceedings-article/act/2009/05376636/13bd1gCd7SH", "parentPublication": { "id": "proceedings/act/2009/3915/0", "title": "Advances in Computing, Control, and Telecommunication Technologies, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09969167", "title": "Image-Driven Harmonious Color Palette Generation for Diverse Information Visualization", "doi": null, "abstractUrl": "/journal/tg/5555/01/09969167/1IMicNIXex2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKiqc", "title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)", "acronym": "bigmm", "groupId": "1808144", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45XuDNGl", "doi": "10.1109/BigMM.2018.8499451", "title": "A New Data Embedding Method with a New Data Embedding Domain for JPEG Images", "normalizedTitle": "A New Data Embedding Method with a New Data Embedding Domain for JPEG Images", "abstract": "Data embedding for digital images is a kind of technology which embeds message, i.e., data stream, into image(s). The design of data embedding may vary according to different applications such as watermarking, data hiding, authentication, steganography, etc. However, the basis of data embedding is the embedding domain. In this paper, we discover a new embedding domain, rounding/truncation error (RTE) domain, which is obtained due to the rounding/truncation error during the decompression of JPEG image, for JPEG image data embedding. To demonstrate the viability of the domain, we propose a new data embedding method which could be exploited for data hiding and authentication. Experimental results indicate that the proposed data embedding method possesses certain embedding capacity and achieves excellent performance when considering imperceptibility of the embedded image.", "abstracts": [ { "abstractType": "Regular", "content": "Data embedding for digital images is a kind of technology which embeds message, i.e., data stream, into image(s). The design of data embedding may vary according to different applications such as watermarking, data hiding, authentication, steganography, etc. However, the basis of data embedding is the embedding domain. In this paper, we discover a new embedding domain, rounding/truncation error (RTE) domain, which is obtained due to the rounding/truncation error during the decompression of JPEG image, for JPEG image data embedding. To demonstrate the viability of the domain, we propose a new data embedding method which could be exploited for data hiding and authentication. Experimental results indicate that the proposed data embedding method possesses certain embedding capacity and achieves excellent performance when considering imperceptibility of the embedded image.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Data embedding for digital images is a kind of technology which embeds message, i.e., data stream, into image(s). The design of data embedding may vary according to different applications such as watermarking, data hiding, authentication, steganography, etc. However, the basis of data embedding is the embedding domain. In this paper, we discover a new embedding domain, rounding/truncation error (RTE) domain, which is obtained due to the rounding/truncation error during the decompression of JPEG image, for JPEG image data embedding. To demonstrate the viability of the domain, we propose a new data embedding method which could be exploited for data hiding and authentication. Experimental results indicate that the proposed data embedding method possesses certain embedding capacity and achieves excellent performance when considering imperceptibility of the embedded image.", "fno": "08499451", "keywords": [ "Data Compression", "Image Coding", "Data Hiding", "Embedded Image Imperceptibility", "JPEG Image Data Embedding Method", "JPEG Image Decompression", "RTE Domain", "Watermarking", "Steganography", "Rounding Truncation Error Domain", "Embedded Image Capacity", "Data Streaming", "Digital Imaging", "Transform Coding", "Discrete Cosine Transforms", "Distortion", "Authentication", "Quantization Signal", "Decoding", "Data Mining", "Data Embedding", "Embedding Domain", "Data Hiding", "JPEG Image Embedding", "Rounding Truncation" ], "authors": [ { "affiliation": "Institute of Information Engineering, CAS, State Key Laboratory of Information Security, Beijing, China", "fullName": "Yuanfang Guo", "givenName": "Yuanfang", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Information Engineering, CAS, State Key Laboratory of Information Security, Beijing, China", "fullName": "Xiaochun Cao", "givenName": "Xiaochun", "surname": "Cao", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Information Engineering, CAS, State Key Laboratory of Information Security, Beijing, China", "fullName": "Rui Wang", "givenName": "Rui", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Fudan University, School of Computer Science, Shanghai, China", "fullName": "Cheng Jin", "givenName": "Cheng", "surname": "Jin", "__typename": "ArticleAuthorType" } ], "idPrefix": "bigmm", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-09-01T00:00:00", "pubType": "proceedings", "pages": "1-5", "year": "2018", "issn": null, "isbn": "978-1-5386-5321-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08499449", "articleId": "17D45VN31gp", "__typename": "AdjacentArticleType" }, "next": { "fno": "08499091", "articleId": "17D45XuDNGk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/asiajcis/2012/4776/0/06298125", "title": "Reversible Data Hiding for JPEG Based on EMD", "doi": null, "abstractUrl": "/proceedings-article/asiajcis/2012/06298125/12OmNCf1Dl7", "parentPublication": { "id": "proceedings/asiajcis/2012/4776/0", "title": "2012 Seventh Asia Joint Conference on Information Security (ASIA JCIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csnt/2012/4692/0/4692a133", "title": "Transform Domain Video Watermarking: Design, Implementation and Performance Analysis", "doi": null, "abstractUrl": "/proceedings-article/csnt/2012/4692a133/12OmNs0C9CF", "parentPublication": { "id": "proceedings/csnt/2012/4692/0", "title": "Communication Systems and Network Technologies, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvisp/2017/0612/0/0612a022", "title": "Fast Compressed Domain JPEG Image Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icvisp/2017/0612a022/12OmNxGAKX5", "parentPublication": { "id": "proceedings/icvisp/2017/0612/0", "title": "2017 International Conference on Vision, Image and Signal Processing (ICVISP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851c764", "title": "D3: Deep Dual-Domain Based Fast Restoration of JPEG-Compressed Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851c764/12OmNzDNtvx", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b402", "title": "Multi-domain Embedding Strategies for Video Steganography by Combining Partition Modes and Motion Vectors", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b402/1cdOMG7gLvi", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnisc/2018/6956/0/695600a126", "title": "Effective Reversible Information Hiding Based on Block Priority of JPEG Images", "doi": null, "abstractUrl": "/proceedings-article/icnisc/2018/695600a126/1dUo4qkjttu", "parentPublication": { "id": "proceedings/icnisc/2018/6956/0", "title": "2018 4th Annual International Conference on Network and Information Systems for Computers (ICNISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300d483", "title": "Deep Residual Learning in the JPEG Transform Domain", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300d483/1hVl834pCQU", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2022/02/09153896", "title": "High Capacity Lossless Data Hiding in JPEG Bitstream Based on General VLC Mapping", "doi": null, "abstractUrl": "/journal/tq/2022/02/09153896/1lUBax3eeFW", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428243", "title": "On Generating JPEG Adversarial Images", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428243/1uim0exNIHu", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2021/4254/0/425400a324", "title": "A Spread Spectrum Based Audio Watermarking Method with Embedding Positions Adaption Using Predominant Local Pulse Extraction", "doi": null, "abstractUrl": "/proceedings-article/iccst/2021/425400a324/1ziPmyQ8GYg", "parentPublication": { "id": "proceedings/iccst/2021/4254/0", "title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1B12DGrwoyQ", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1B13SlHr1UQ", "doi": "10.1109/WACV51458.2022.00261", "title": "Parsing Line Chart Images Using Linear Programming", "normalizedTitle": "Parsing Line Chart Images Using Linear Programming", "abstract": "This paper proposes a method for automatically recovering data from chart images. In particular we focus on the task of estimating line charts, as the most common chart type, in a fully automatic way that handles line occlusions, as well as lines of different styles, e.g. dashed or dotted. For this, we first train a single semantic segmentation network to predict probability maps for each different line styles. We then construct a graph based on this output and formulate the line tracing task as a minimum-cost-flow problem, optimizing a cost function using linear programming. From the traced lines, the axes, and text labels, we recover the numerical values used to generate the chart. In experiments on six datasets, containing both synthesized and crawled images, we show significant improvements over prior work.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes a method for automatically recovering data from chart images. In particular we focus on the task of estimating line charts, as the most common chart type, in a fully automatic way that handles line occlusions, as well as lines of different styles, e.g. dashed or dotted. For this, we first train a single semantic segmentation network to predict probability maps for each different line styles. We then construct a graph based on this output and formulate the line tracing task as a minimum-cost-flow problem, optimizing a cost function using linear programming. From the traced lines, the axes, and text labels, we recover the numerical values used to generate the chart. In experiments on six datasets, containing both synthesized and crawled images, we show significant improvements over prior work.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes a method for automatically recovering data from chart images. In particular we focus on the task of estimating line charts, as the most common chart type, in a fully automatic way that handles line occlusions, as well as lines of different styles, e.g. dashed or dotted. For this, we first train a single semantic segmentation network to predict probability maps for each different line styles. We then construct a graph based on this output and formulate the line tracing task as a minimum-cost-flow problem, optimizing a cost function using linear programming. From the traced lines, the axes, and text labels, we recover the numerical values used to generate the chart. In experiments on six datasets, containing both synthesized and crawled images, we show significant improvements over prior work.", "fno": "091500c553", "keywords": [ "Charts", "Data Visualisation", "Graph Theory", "Image Representation", "Image Segmentation", "Linear Programming", "Natural Language Processing", "Probability", "Linear Programming", "Line Charts", "Common Chart Type", "Line Occlusions", "Single Semantic Segmentation Network", "Different Line Styles", "Line Tracing Task", "Minimum Cost Flow Problem", "Traced Lines", "Parsing Line Chart Images", "Image Segmentation", "Computer Vision", "Semantics", "Fitting", "Linear Programming", "Cost Function", "Data Mining", "Document Analysis Vision Systems And Applications" ], "authors": [ { "affiliation": "Rakuten Group, Inc.,Rakuten Institute of Technology", "fullName": "Hajime Kato", "givenName": "Hajime", "surname": "Kato", "__typename": "ArticleAuthorType" }, { "affiliation": "Rakuten Group, Inc.,Rakuten Institute of Technology", "fullName": "Mitsuru Nakazawa", "givenName": "Mitsuru", "surname": "Nakazawa", "__typename": "ArticleAuthorType" }, { "affiliation": "National Tsing Hua University", "fullName": "Hsuan-Kung Yang", "givenName": "Hsuan-Kung", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo", "fullName": "Mark Chen", "givenName": "Mark", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Rakuten Group, Inc.,Rakuten Institute of Technology", "fullName": "Björn Stenger", "givenName": "Björn", "surname": "Stenger", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-01-01T00:00:00", "pubType": "proceedings", "pages": "2553-2562", "year": "2022", "issn": null, "isbn": "978-1-6654-0915-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1B13Siix4gU", "name": "pwacv202209150-09706968s1-mm_091500c553.zip", "size": "5.61 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pwacv202209150-09706968s1-mm_091500c553.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "091500c543", "articleId": "1B12YVEeHyE", "__typename": "AdjacentArticleType" }, "next": { "fno": "091500c563", "articleId": "1B13RrJU12g", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdar/2017/3586/1/3586a865", "title": "Robust Document Image Dewarping Method Using Text-Lines and Line Segments", "doi": null, "abstractUrl": "/proceedings-article/icdar/2017/3586a865/12OmNAkWvHG", "parentPublication": { "id": "proceedings/icdar/2017/3586/1", "title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2011/2135/0/06121002", "title": "Text Line Based Correction of Distorted Document Images", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2011/06121002/12OmNBKEyyP", "parentPublication": { "id": "proceedings/trustcom/2011/2135/0", "title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/allerton/2008/2925/0/04797568", "title": "Linear-programming receivers", "doi": null, "abstractUrl": "/proceedings-article/allerton/2008/04797568/12OmNBqv2o7", "parentPublication": { "id": "proceedings/allerton/2008/2925/0", "title": "2008 46th Annual Allerton Conference on Communication, Control, and Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2014/4335/0/06981016", "title": "A Path Planning for Line Segmentation of Handwritten Documents", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2014/06981016/12OmNvStcAe", "parentPublication": { "id": "proceedings/icfhr/2014/4335/0", "title": "2014 14th International Conference on Frontiers in Handwriting Recognition (ICFHR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2017/2610/0/261001a584", "title": "Line Association and Vanishing Point Estimation with Binary Quadratic Programming", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a584/12OmNvjQ8Lq", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2011/4520/0/4520a324", "title": "A New Text-Line Alignment Approach Based on Piece-Wise Painting Algorithm for Handwritten Documents", "doi": null, "abstractUrl": "/proceedings-article/icdar/2011/4520a324/12OmNvpw7d3", "parentPublication": { "id": "proceedings/icdar/2011/4520/0", "title": "2011 International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iitsi/2010/4020/0/4020a740", "title": "A New Method of Sea-Sky-Line Detection", "doi": null, "abstractUrl": "/proceedings-article/iitsi/2010/4020a740/12OmNwK7ocj", "parentPublication": { "id": "proceedings/iitsi/2010/4020/0", "title": "Intelligent Information Technology and Security Informatics, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/1971/01/01671675", "title": "Optimal Curve Fitting With Piecewise Linear Functions", "doi": null, "abstractUrl": "/journal/tc/1971/01/01671675/13rRUwbaqKl", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440849", "title": "At a Glance: Pixel Approximate Entropy as a Measure of Line Chart Complexity", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440849/17D45XH89qk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956617", "title": "An effective method for text line segmentation in historical document images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956617/1IHp1kfJ98A", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxQOjzD", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "1997", "__typename": "ProceedingType" }, "article": { "id": "12OmNs5rkZ3", "doi": "10.1109/VISUAL.1997.663890", "title": "Collaborative visualization", "normalizedTitle": "Collaborative visualization", "abstract": "Current visualization systems are designed around a single user model, making it awkward for large research teams to collectively analyse large data sets. The paper shows how the popular data flow approach to visualization can be extended to allow multiple users to collaborate-each running their own visualization pipeline but with the opportunity to connect in data generated by a colleague, Thus collaborative visualizations are 'programmed' in exactly the same 'plug-and-play' style as is now customary for single-user mode. The paper describes a system architecture that can act as a basis for the collaborative extension of any data flow visualization system, and the ideas are demonstrated through a particular implementation in terms of IRIS Explorer.", "abstracts": [ { "abstractType": "Regular", "content": "Current visualization systems are designed around a single user model, making it awkward for large research teams to collectively analyse large data sets. The paper shows how the popular data flow approach to visualization can be extended to allow multiple users to collaborate-each running their own visualization pipeline but with the opportunity to connect in data generated by a colleague, Thus collaborative visualizations are 'programmed' in exactly the same 'plug-and-play' style as is now customary for single-user mode. The paper describes a system architecture that can act as a basis for the collaborative extension of any data flow visualization system, and the ideas are demonstrated through a particular implementation in terms of IRIS Explorer.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Current visualization systems are designed around a single user model, making it awkward for large research teams to collectively analyse large data sets. The paper shows how the popular data flow approach to visualization can be extended to allow multiple users to collaborate-each running their own visualization pipeline but with the opportunity to connect in data generated by a colleague, Thus collaborative visualizations are 'programmed' in exactly the same 'plug-and-play' style as is now customary for single-user mode. The paper describes a system architecture that can act as a basis for the collaborative extension of any data flow visualization system, and the ideas are demonstrated through a particular implementation in terms of IRIS Explorer.", "fno": "82620253", "keywords": [ "Data Visualisation Collaborative Visualization User Model Collective Large Data Set Analysis Visualization Pipeline System Architecture Data Flow Visualization System IRIS Explorer" ], "authors": [ { "affiliation": "Sch. of Comput. Studies, Leeds Univ., UK", "fullName": "J. Wood", "givenName": "J.", "surname": "Wood", "__typename": "ArticleAuthorType" }, { "affiliation": "Sch. of Comput. Studies, Leeds Univ., UK", "fullName": "H. Wright", "givenName": "H.", "surname": "Wright", "__typename": "ArticleAuthorType" }, { "affiliation": "Sch. of Comput. Studies, Leeds Univ., UK", "fullName": "K. Brodie", "givenName": "K.", "surname": "Brodie", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1997-10-01T00:00:00", "pubType": "proceedings", "pages": "253", "year": "1997", "issn": "1070-2385", "isbn": "0-8186-8262-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "82620245", "articleId": "12OmNrFBPVW", "__typename": "AdjacentArticleType" }, "next": { "fno": "82620261", "articleId": "12OmNBqMDi3", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxcMSdu", "title": "2015 IEEE 7th International Conference on Cloud Computing Technology and Science (CloudCom)", "acronym": "cloudcom", "groupId": "1800284", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNy2rRSo", "doi": "10.1109/CloudCom.2015.65", "title": "The Ignite Distributed Collaborative Scientific Visualization System", "normalizedTitle": "The Ignite Distributed Collaborative Scientific Visualization System", "abstract": "We describe the Ignite Distributed Collaborative Scientific Visualization System (IDCVS), a system which permits real-time interaction and visual collaboration around large data sets, with an initial emphasis on scientific data. The IDCVS offers such a collaborative environment, with real-time interaction on any device between users separated across the wide area. It provides seamless interaction and immediate updates even under heavy load and when users are widely separated: the design goal was to fetch a data set consisting of 30,000 points from a server and render it within 150ms, for a user anywhere in the world, and reflect changes made by a user in one location to all other users within a bound provided by network latency. The system was demonstrated successfully on a significant worldwide air pollution data set, with values on 10, 25, 50, and 100km worldwide grids, monthly over an 18-year period. It was demonstrated on a wide variety of clients, including laptop, tablet, and smartphone.", "abstracts": [ { "abstractType": "Regular", "content": "We describe the Ignite Distributed Collaborative Scientific Visualization System (IDCVS), a system which permits real-time interaction and visual collaboration around large data sets, with an initial emphasis on scientific data. The IDCVS offers such a collaborative environment, with real-time interaction on any device between users separated across the wide area. It provides seamless interaction and immediate updates even under heavy load and when users are widely separated: the design goal was to fetch a data set consisting of 30,000 points from a server and render it within 150ms, for a user anywhere in the world, and reflect changes made by a user in one location to all other users within a bound provided by network latency. The system was demonstrated successfully on a significant worldwide air pollution data set, with values on 10, 25, 50, and 100km worldwide grids, monthly over an 18-year period. It was demonstrated on a wide variety of clients, including laptop, tablet, and smartphone.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We describe the Ignite Distributed Collaborative Scientific Visualization System (IDCVS), a system which permits real-time interaction and visual collaboration around large data sets, with an initial emphasis on scientific data. The IDCVS offers such a collaborative environment, with real-time interaction on any device between users separated across the wide area. It provides seamless interaction and immediate updates even under heavy load and when users are widely separated: the design goal was to fetch a data set consisting of 30,000 points from a server and render it within 150ms, for a user anywhere in the world, and reflect changes made by a user in one location to all other users within a bound provided by network latency. The system was demonstrated successfully on a significant worldwide air pollution data set, with values on 10, 25, 50, and 100km worldwide grids, monthly over an 18-year period. It was demonstrated on a wide variety of clients, including laptop, tablet, and smartphone.", "fno": "9560a186", "keywords": [ "Servers", "Collaboration", "Data Visualization", "Computer Architecture", "Games", "Browsers", "Continents", "Distributed Cloud", "Visualization" ], "authors": [ { "affiliation": null, "fullName": "Sushil Bhojwani", "givenName": "Sushil", "surname": "Bhojwani", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Matthew Hemmings", "givenName": "Matthew", "surname": "Hemmings", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Daniel Ingalls", "givenName": "Daniel", "surname": "Ingalls", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jens Lincke", "givenName": "Jens", "surname": "Lincke", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Robert Krahn", "givenName": "Robert", "surname": "Krahn", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "David Lary", "givenName": "David", "surname": "Lary", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Patrick McGeer", "givenName": "Patrick", "surname": "McGeer", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Glenn Ricart", "givenName": "Glenn", "surname": "Ricart", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Marko Roeder", "givenName": "Marko", "surname": "Roeder", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yvonne Coady", "givenName": "Yvonne", "surname": "Coady", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ulrike Stege", "givenName": "Ulrike", "surname": "Stege", "__typename": "ArticleAuthorType" } ], "idPrefix": "cloudcom", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-11-01T00:00:00", "pubType": "proceedings", "pages": "186-191", "year": "2015", "issn": null, "isbn": "978-1-4673-9560-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "9560a180", "articleId": "12OmNy7Qfu1", "__typename": "AdjacentArticleType" }, "next": { "fno": "9560a192", "articleId": "12OmNxAlA6w", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cts/2016/2300/0/07870997", "title": "High-Resolution Interactive and Collaborative Data Visualization Framework for Large-Scale Data Analysis", "doi": null, "abstractUrl": "/proceedings-article/cts/2016/07870997/12OmNBOCWfQ", "parentPublication": { "id": "proceedings/cts/2016/2300/0", "title": "2016 International Conference on Collaboration Technologies and Systems (CTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2017/0621/0/0621a915", "title": "Collaborative Group Label Work System Based on Web Technology", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2017/0621a915/12OmNCesr8Y", "parentPublication": { "id": "proceedings/iiai-aai/2017/0621/0", "title": "2017 6th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2015/7673/0/7673a232", "title": "Research of Collaborative Interactive for Medical Imaging Based on Roles Change", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2015/7673a232/12OmNrYlmEX", "parentPublication": { "id": "proceedings/icvrv/2015/7673/0", "title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gas/2015/7046/0/7046a022", "title": "Space Connection: A New 3D Tele-immersion Platform for Web-Based Gesture-Collaborative Games and Services", "doi": null, "abstractUrl": "/proceedings-article/gas/2015/7046a022/12OmNwtn3sw", "parentPublication": { "id": "proceedings/gas/2015/7046/0", "title": "2015 IEEE/ACM 4th International Workshop on Games and Software Engineering (GAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sccc/2014/0421/0/0421a107", "title": "Kelluntekun: Rich Internet Application for Collaborative Snippet Management, Using Oows2.0 and Vaadin", "doi": null, "abstractUrl": "/proceedings-article/sccc/2014/0421a107/12OmNy3RRAa", "parentPublication": { "id": "proceedings/sccc/2014/0421/0", "title": "2014 33rd International Conference of the Chilean Computer Science Society (SCCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2014/6854/0/6854a081", "title": "Multiplayer Collaborative Training System Based on Mobile AR Innovative Interaction Technology", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2014/6854a081/12OmNynJMDr", "parentPublication": { "id": "proceedings/icvrv/2014/6854/0", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2016/4149/0/4149a220", "title": "Distributed Virtual Reality for Collaborative Backlot Visualization", "doi": null, "abstractUrl": "/proceedings-article/svr/2016/4149a220/12OmNzIl3DG", "parentPublication": { "id": "proceedings/svr/2016/4149/0", "title": "2016 XVIII Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2014/6854/0/6854a110", "title": "Research of Collaborative Interactive Visualization for Medical Imaging", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2014/6854a110/12OmNzzP5Ql", "parentPublication": { "id": "proceedings/icvrv/2014/6854/0", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/04/08523628", "title": "Scientific Visualization as a Microservice", "doi": null, "abstractUrl": "/journal/tg/2020/04/08523628/17D45WaTkiH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222110", "title": "VisConnect: Distributed Event Synchronization for Collaborative Visualization", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222110/1nTrMx8JcGI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1JeEzraOedO", "title": "2022 Working Conference on Software Visualization (VISSOFT)", "acronym": "vissoft", "groupId": "9978165", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1JeEzW6wcZG", "doi": "10.1109/VISSOFT55257.2022.00016", "title": "Collaborative Software Visualization for Program Comprehension", "normalizedTitle": "Collaborative Software Visualization for Program Comprehension", "abstract": "In the context of program comprehension, learning and working in teams, e.g., via pair programming, shared documentation, and discussions, can facilitate the comprehension tasks. So far, team collaboration is a relatively unexplored aspect in software visualizations, in particular approaches which are designed and explored to enable collaborative program comprehension.In this paper, we introduce our collaboratively usable software visualization environment for program comprehension. Related approaches are often limited to single-user modes, therefore neglect the advantages of multi-user collaboration, or allow only the use of a single type of device. Our approach addresses this topic and allows users to collaboratively explore software visualizations in a device-heterogeneous environment. User events, e.g., sharable pop-up information windows, are synchronized between each session participant, regardless of the employed device.To the best of our knowledge, this is one of the first approaches that combines on-screen, virtual reality, and augmented reality modes in a single web-based SV tool for program comprehension. We conducted a user study to collect initial results regarding the perceived usefulness and enjoyment of co-explored software cities In that study, 20 participants collaboratively solved program comprehension tasks while using each mode consecutively. The results indicate that the majority of participants find our approach useful and enjoyable, with AR being the least favored mode. We provide each participant&#x2019;s video recording, the study&#x2019;s raw results, Jupyter Notebooks, and all steps to reproduce our evaluation as supplementary package. Furthermore, a live demo of our tool is available online.<sup>1</sup> We invite other researchers to extend our open-source software and jointly research this novel approach.Video URL: https://youtu.be/MYAkRMWLVD8", "abstracts": [ { "abstractType": "Regular", "content": "In the context of program comprehension, learning and working in teams, e.g., via pair programming, shared documentation, and discussions, can facilitate the comprehension tasks. So far, team collaboration is a relatively unexplored aspect in software visualizations, in particular approaches which are designed and explored to enable collaborative program comprehension.In this paper, we introduce our collaboratively usable software visualization environment for program comprehension. Related approaches are often limited to single-user modes, therefore neglect the advantages of multi-user collaboration, or allow only the use of a single type of device. Our approach addresses this topic and allows users to collaboratively explore software visualizations in a device-heterogeneous environment. User events, e.g., sharable pop-up information windows, are synchronized between each session participant, regardless of the employed device.To the best of our knowledge, this is one of the first approaches that combines on-screen, virtual reality, and augmented reality modes in a single web-based SV tool for program comprehension. We conducted a user study to collect initial results regarding the perceived usefulness and enjoyment of co-explored software cities In that study, 20 participants collaboratively solved program comprehension tasks while using each mode consecutively. The results indicate that the majority of participants find our approach useful and enjoyable, with AR being the least favored mode. We provide each participant&#x2019;s video recording, the study&#x2019;s raw results, Jupyter Notebooks, and all steps to reproduce our evaluation as supplementary package. Furthermore, a live demo of our tool is available online.<sup>1</sup> We invite other researchers to extend our open-source software and jointly research this novel approach.Video URL: https://youtu.be/MYAkRMWLVD8", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In the context of program comprehension, learning and working in teams, e.g., via pair programming, shared documentation, and discussions, can facilitate the comprehension tasks. So far, team collaboration is a relatively unexplored aspect in software visualizations, in particular approaches which are designed and explored to enable collaborative program comprehension.In this paper, we introduce our collaboratively usable software visualization environment for program comprehension. Related approaches are often limited to single-user modes, therefore neglect the advantages of multi-user collaboration, or allow only the use of a single type of device. Our approach addresses this topic and allows users to collaboratively explore software visualizations in a device-heterogeneous environment. User events, e.g., sharable pop-up information windows, are synchronized between each session participant, regardless of the employed device.To the best of our knowledge, this is one of the first approaches that combines on-screen, virtual reality, and augmented reality modes in a single web-based SV tool for program comprehension. We conducted a user study to collect initial results regarding the perceived usefulness and enjoyment of co-explored software cities In that study, 20 participants collaboratively solved program comprehension tasks while using each mode consecutively. The results indicate that the majority of participants find our approach useful and enjoyable, with AR being the least favored mode. We provide each participant’s video recording, the study’s raw results, Jupyter Notebooks, and all steps to reproduce our evaluation as supplementary package. Furthermore, a live demo of our tool is available online.1 We invite other researchers to extend our open-source software and jointly research this novel approach.Video URL: https://youtu.be/MYAkRMWLVD8", "fno": "809200a075", "keywords": [ "Augmented Reality", "Data Visualisation", "Groupware", "Internet", "Program Visualisation", "Public Domain Software", "Software Engineering", "Augmented Reality Modes", "Co Explored Software Cities", "Collaborative Program Comprehension", "Collaborative Software Visualization", "Device Heterogeneous Environment", "Multiuser Collaboration", "Open Source Software", "Pair Programming", "Sharable Pop Up Information Windows", "Shared Documentation", "Single Web Based SV Tool", "Single User Modes", "Team Collaboration", "Video Recording", "Virtual Reality", "Uniform Resource Locators", "Knowledge Engineering", "Visualization", "Urban Areas", "Collaboration", "Documentation", "Synchronization", "Program Comprehension", "Software Visualization", "Collaboration", "Software As A Service", "Extended Reality" ], "authors": [ { "affiliation": "Kiel University,Software Engineering Group,Kiel,Germany", "fullName": "Alexander Krause-Glau", "givenName": "Alexander", "surname": "Krause-Glau", "__typename": "ArticleAuthorType" }, { "affiliation": "Kiel University,Software Engineering Group,Kiel,Germany", "fullName": "Marcel Bader", "givenName": "Marcel", "surname": "Bader", "__typename": "ArticleAuthorType" }, { "affiliation": "Kiel University,Software Engineering Group,Kiel,Germany", "fullName": "Wilhelm Hasselbring", "givenName": "Wilhelm", "surname": "Hasselbring", "__typename": "ArticleAuthorType" } ], "idPrefix": "vissoft", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "75-86", "year": "2022", "issn": null, "isbn": "978-1-6654-8092-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "809200a063", "articleId": "1JeEGhDEsFi", "__typename": "AdjacentArticleType" }, "next": { "fno": "809200a087", "articleId": "1JeEDJKZm6s", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wpc/1993/4042/0/00263911", "title": "Criteria for program comprehension derived from software complexity metrics", "doi": null, "abstractUrl": "/proceedings-article/wpc/1993/00263911/12OmNBB0bVX", "parentPublication": { "id": "proceedings/wpc/1993/4042/0", "title": "1993 IEEE Second Workshop on Program Comprehension", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsee/2012/4647/1/4647a601", "title": "Overview of Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/iccsee/2012/4647a601/12OmNBziBc8", "parentPublication": { "id": "proceedings/iccsee/2012/4647/2", "title": "Computer Science and Electronics Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpc/2009/3998/0/05090033", "title": "Trace visualization for program comprehension: A controlled experiment", "doi": null, "abstractUrl": "/proceedings-article/icpc/2009/05090033/12OmNCdBDHF", "parentPublication": { "id": "proceedings/icpc/2009/3998/0", "title": "2009 IEEE 17th International Conference on Program Comprehension (ICPC 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esem/2011/4604/0/4604a127", "title": "Exploring Software Measures to Assess Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/esem/2011/4604a127/12OmNxXCGIV", "parentPublication": { "id": "proceedings/esem/2011/4604/0", "title": "2011 International Symposium on Empirical Software Engineering and Measurement", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpc/2007/2860/0/28600231", "title": "Program Comprehension through Software Habitability", "doi": null, "abstractUrl": "/proceedings-article/icpc/2007/28600231/12OmNy50g4S", "parentPublication": { "id": "proceedings/icpc/2007/2860/0", "title": "15th IEEE International Conference on Program Comprehension (ICPC '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsm/2008/2613/0/04658097", "title": "COSS: Comprehension by ontologising software system", "doi": null, "abstractUrl": "/proceedings-article/icsm/2008/04658097/12OmNzh5z1q", "parentPublication": { "id": "proceedings/icsm/2008/2613/0", "title": "2008 IEEE International Conference on Software Maintenance", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2022/9598/0/959800a041", "title": "COSPEX: A Program Comprehension Tool for Novice Programmers", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2022/959800a041/1EaP68o0IjS", "parentPublication": { "id": "proceedings/icse-companion/2022/9598/0", "title": "2022 IEEE/ACM 44th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2022/9598/0/959800a130", "title": "M3triCity: Visualizing Evolving Software &#x0026; Data Cities", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2022/959800a130/1EaP6LqZuI8", "parentPublication": { "id": "proceedings/icse-companion/2022/9598/0", "title": "2022 IEEE/ACM 44th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpc/2022/9298/0/929800a597", "title": "Using Discord Conversations as Program Comprehension Aid", "doi": null, "abstractUrl": "/proceedings-article/icpc/2022/929800a597/1EpKOoZZ8uQ", "parentPublication": { "id": "proceedings/icpc/2022/9298/0", "title": "2022 IEEE/ACM 30th International Conference on Program Comprehension (ICPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vissoft/2021/3144/0/314400a055", "title": "Trace Visualization within the Software City Metaphor: A Controlled Experiment on Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/vissoft/2021/314400a055/1yrHurrXXLW", "parentPublication": { "id": "proceedings/vissoft/2021/3144/0", "title": "2021 Working Conference on Software Visualization (VISSOFT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwpGgL4", "title": "Secure System Integration and Reliability Improvement", "acronym": "ssiri", "groupId": "1002113", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNBtl1E6", "doi": "10.1109/SSIRI.2009.58", "title": "Generating Test Cases for Timed Systems from Controlled Natural Language Specifications", "normalizedTitle": "Generating Test Cases for Timed Systems from Controlled Natural Language Specifications", "abstract": "Dynamic testing is still the most used quality assurance technique in the automotive industry. There is a need to automate the testing process as much as possible. In this work we focus on the automatic generation of test cases from requirement specifications. To embed the approach as close as possible into existing workflows we start with natural language like specifications, as requirements are still mostly written in natural language. To support this, we specify a controlled natural language for the automotive domain. After acquiring the requirements they are translated into a formal model. The model enables an efficient reachability analysis and allows to describe rich temporal behavior. We then use partial order planning to create positive and negative tests. The resulting test cases are able to handle non-deterministic timing behavior. Furthermore the test cases can be presented in a comprehensible way, so that the reader can validate them.", "abstracts": [ { "abstractType": "Regular", "content": "Dynamic testing is still the most used quality assurance technique in the automotive industry. There is a need to automate the testing process as much as possible. In this work we focus on the automatic generation of test cases from requirement specifications. To embed the approach as close as possible into existing workflows we start with natural language like specifications, as requirements are still mostly written in natural language. To support this, we specify a controlled natural language for the automotive domain. After acquiring the requirements they are translated into a formal model. The model enables an efficient reachability analysis and allows to describe rich temporal behavior. We then use partial order planning to create positive and negative tests. The resulting test cases are able to handle non-deterministic timing behavior. Furthermore the test cases can be presented in a comprehensible way, so that the reader can validate them.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Dynamic testing is still the most used quality assurance technique in the automotive industry. There is a need to automate the testing process as much as possible. In this work we focus on the automatic generation of test cases from requirement specifications. To embed the approach as close as possible into existing workflows we start with natural language like specifications, as requirements are still mostly written in natural language. To support this, we specify a controlled natural language for the automotive domain. After acquiring the requirements they are translated into a formal model. The model enables an efficient reachability analysis and allows to describe rich temporal behavior. We then use partial order planning to create positive and negative tests. The resulting test cases are able to handle non-deterministic timing behavior. Furthermore the test cases can be presented in a comprehensible way, so that the reader can validate them.", "fno": "3758a348", "keywords": [ "Test Case Generation", "Timed Systems", "Controlled Natural Language", "Planning" ], "authors": [ { "affiliation": null, "fullName": "Matthias Schnelte", "givenName": "Matthias", "surname": "Schnelte", "__typename": "ArticleAuthorType" } ], "idPrefix": "ssiri", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-07-01T00:00:00", "pubType": "proceedings", "pages": "348-353", "year": "2009", "issn": null, "isbn": "978-0-7695-3758-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3758a342", "articleId": "12OmNBIWXCt", "__typename": "AdjacentArticleType" }, "next": { "fno": "3758a359", "articleId": "12OmNvFpEvr", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/apsec/2004/2245/0/22450284", "title": "Generating Test Cases from UML Activity Diagram based on Gray-Box Method", "doi": null, "abstractUrl": "/proceedings-article/apsec/2004/22450284/12OmNBEpnC8", "parentPublication": { "id": "proceedings/apsec/2004/2245/0", "title": "11th Asia-Pacific Software Engineering Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rew/2017/3488/0/3488a331", "title": "Integrating Graphical and Natural Language Specifications to Support Analysis and Testing", "doi": null, "abstractUrl": "/proceedings-article/rew/2017/3488a331/12OmNrJAdRc", "parentPublication": { "id": "proceedings/rew/2017/3488/0", "title": "2017 IEEE 25th International Requirements Engineering Conference Workshops (REW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ret/2014/6334/0/06908677", "title": "C&amp;L: Generating model based test cases from natural language requirements descriptions", "doi": null, "abstractUrl": "/proceedings-article/ret/2014/06908677/12OmNs0TKIZ", "parentPublication": { "id": "proceedings/ret/2014/6334/0", "title": "2014 IEEE 1st International Workshop on Requirements Engineering and Testing (RET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/1997/7961/0/79610309", "title": "From formal specifications to natural language: a case study", "doi": null, "abstractUrl": "/proceedings-article/ase/1997/79610309/12OmNvSbBsX", "parentPublication": { "id": "proceedings/ase/1997/7961/0", "title": "Proceedings 12th IEEE International Conference Automated Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iceccs/2000/0583/0/05830164", "title": "A Test Class Framework for Generating Test Cases from Z Specifications", "doi": null, "abstractUrl": "/proceedings-article/iceccs/2000/05830164/12OmNwDAC59", "parentPublication": { "id": "proceedings/iceccs/2000/0583/0", "title": "Engineering of Complex Computer Systems, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icst/2018/5012/0/501201a023", "title": "Automated Generation of Constraints from Use Case Specifications to Support System Testing", "doi": null, "abstractUrl": "/proceedings-article/icst/2018/501201a023/12OmNzJbR1q", "parentPublication": { "id": "proceedings/icst/2018/5012/0", "title": "2018 IEEE 11th International Conference on Software Testing, Verification and Validation (ICST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tools/2000/0918/0/09180108", "title": "Prioritising Use Cases and Scenarios", "doi": null, "abstractUrl": "/proceedings-article/tools/2000/09180108/12OmNzw8iYM", "parentPublication": { "id": "proceedings/tools/2000/0918/0", "title": "37th International Conference on Technology of Object-Oriented Languages and Systems (TOOLS-37'00)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2019/6783/0/08665517", "title": "Controlled Natural Language Framework for Generating Assertions from Hardware Specifications", "doi": null, "abstractUrl": "/proceedings-article/icsc/2019/08665517/18qcdZ7PxBe", "parentPublication": { "id": "proceedings/icsc/2019/6783/0", "title": "2019 IEEE 13th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2019/0869/0/086900a188", "title": "Automatically Generating Precise Oracles from Structured Natural Language Specifications", "doi": null, "abstractUrl": "/proceedings-article/icse/2019/086900a188/1cMFuexw06Y", "parentPublication": { "id": "proceedings/icse/2019/0869/0", "title": "2019 IEEE/ACM 41st International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2022/02/09103626", "title": "Automatic Generation of Acceptance Test Cases From Use Case Specifications: An NLP-Based Approach", "doi": null, "abstractUrl": "/journal/ts/2022/02/09103626/1kersE97zri", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyugz5g", "title": "2018 IEEE 11th International Conference on Software Testing, Verification and Validation (ICST)", "acronym": "icst", "groupId": "1001832", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "12OmNC8dgqe", "doi": "10.1109/ICST.2018.00054", "title": "Testing Natural Language Grammars", "normalizedTitle": "Testing Natural Language Grammars", "abstract": "Testing grammars has one big difference from testing software: natural language has no formal specification, so ultimately we must involve a human oracle. However, we can automate many useful subtasks: detect ambiguous constructions and contradictory grammar rules, as well as generate minimal and representative set of examples that cover all the constructions. Think of the whole grammar as a haystack, and we suspect there are a few needles-we cannot promise automatic needle-removal, but instead we help the human oracle to narrow down the search.", "abstracts": [ { "abstractType": "Regular", "content": "Testing grammars has one big difference from testing software: natural language has no formal specification, so ultimately we must involve a human oracle. However, we can automate many useful subtasks: detect ambiguous constructions and contradictory grammar rules, as well as generate minimal and representative set of examples that cover all the constructions. Think of the whole grammar as a haystack, and we suspect there are a few needles-we cannot promise automatic needle-removal, but instead we help the human oracle to narrow down the search.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Testing grammars has one big difference from testing software: natural language has no formal specification, so ultimately we must involve a human oracle. However, we can automate many useful subtasks: detect ambiguous constructions and contradictory grammar rules, as well as generate minimal and representative set of examples that cover all the constructions. Think of the whole grammar as a haystack, and we suspect there are a few needles-we cannot promise automatic needle-removal, but instead we help the human oracle to narrow down the search.", "fno": "501201a428", "keywords": [ "Context Free Grammars", "Grammars", "Natural Languages", "Program Testing", "Formal Specification", "Human Oracle", "Useful Subtasks", "Ambiguous Constructions", "Contradictory Grammar Rules", "Testing Software", "Natural Language Grammar Testing", "Automatic Needle Removal", "Grammar", "Testing", "Syntactics", "Conferences", "Natural Language Processing", "Grammar Analysis", "Symbolic Evaluation", "Test Case Generation" ], "authors": [ { "affiliation": null, "fullName": "Inari Listenmaa", "givenName": "Inari", "surname": "Listenmaa", "__typename": "ArticleAuthorType" } ], "idPrefix": "icst", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-04-01T00:00:00", "pubType": "proceedings", "pages": "428-429", "year": "2018", "issn": null, "isbn": "978-1-5386-5012-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "501201a426", "articleId": "12OmNBQTJl1", "__typename": "AdjacentArticleType" }, "next": { "fno": "501201a430", "articleId": "12OmNBl6EJe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ialp/2009/3904/0/3904a220", "title": "Improving the Performance of the Link Parser", "doi": null, "abstractUrl": "/proceedings-article/ialp/2009/3904a220/12OmNCgJeav", "parentPublication": { "id": "proceedings/ialp/2009/3904/0", "title": "Asian Language Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acsc/2000/0518/0/05180024", "title": "Object-Oriented Natural Language Requirements Specification", "doi": null, "abstractUrl": "/proceedings-article/acsc/2000/05180024/12OmNvT2oUC", "parentPublication": { "id": "proceedings/acsc/2000/0518/0", "title": "Australasian Computer Science Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2013/5096/0/5096a309", "title": "A Study of Parsing Process on Natural Language Processing in Bahasa Indonesia", "doi": null, "abstractUrl": "/proceedings-article/cse/2013/5096a309/12OmNxEBz4A", "parentPublication": { "id": "proceedings/cse/2013/5096/0", "title": "2013 IEEE 16th International Conference on Computational Science and Engineering (CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ds-rt/2011/1643/0/06051794", "title": "A Grammar for Battle Management Language", "doi": null, "abstractUrl": "/proceedings-article/ds-rt/2011/06051794/12OmNxjBfmu", "parentPublication": { "id": "proceedings/ds-rt/2011/1643/0", "title": "2011 IEEE/ACM 15th International Symposium on Distributed Simulation and Real Time Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ialp/2011/4554/0/4554a015", "title": "Natural Language Grammar Induction of Indonesian Language Corpora Using Genetic Algorithm", "doi": null, "abstractUrl": "/proceedings-article/ialp/2011/4554a015/12OmNyprnt4", "parentPublication": { "id": "proceedings/ialp/2011/4554/0", "title": "Asian Language Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2002/01/k0172", "title": "Semiautomatic Acquisition of Semantic Structures for Understanding Domain-Specific Natural Language Queries", "doi": null, "abstractUrl": "/journal/tk/2002/01/k0172/13rRUIM2VBW", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/1990/02/s2085", "title": "Representing Natural Language with Prolog", "doi": null, "abstractUrl": "/magazine/so/1990/02/s2085/13rRUy0HYPb", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2019/6783/0/08665517", "title": "Controlled Natural Language Framework for Generating Assertions from Hardware Specifications", "doi": null, "abstractUrl": "/proceedings-article/icsc/2019/08665517/18qcdZ7PxBe", "parentPublication": { "id": "proceedings/icsc/2019/6783/0", "title": "2019 IEEE 13th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2021/0337/0/033700a456", "title": "Learning Highly Recursive Input Grammars", "doi": null, "abstractUrl": "/proceedings-article/ase/2021/033700a456/1AjT214qFcA", "parentPublication": { "id": "proceedings/ase/2021/0337/0", "title": "2021 36th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbi/2020/9926/2/09140266", "title": "Semantic search using Natural Language Processing", "doi": null, "abstractUrl": "/proceedings-article/cbi/2020/09140266/1lu6SkF3MME", "parentPublication": { "id": "proceedings/cbi/2020/9926/2", "title": "2020 IEEE 22nd Conference on Business Informatics (CBI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBRbknI", "title": "The Sixth IEEE International Conference on Computer and Information Technology", "acronym": "cit", "groupId": "1001306", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNrFkeRf", "doi": "10.1109/CIT.2006.103", "title": "Improving the Quality of Natural Language Requirements Specifications through Natural Language Requirements Patterns", "normalizedTitle": "Improving the Quality of Natural Language Requirements Specifications through Natural Language Requirements Patterns", "abstract": "This paper presents an approach for reducing the problem of ambiguity and imprecision in natural language requirements specifications with the use of language quality patterns and guiding rules. To ensure the applicability of our approach, we study different sets of requirements documents from several domains. We further validate our approach by rewriting the requirements statements derived from these requirements documents.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents an approach for reducing the problem of ambiguity and imprecision in natural language requirements specifications with the use of language quality patterns and guiding rules. To ensure the applicability of our approach, we study different sets of requirements documents from several domains. We further validate our approach by rewriting the requirements statements derived from these requirements documents.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents an approach for reducing the problem of ambiguity and imprecision in natural language requirements specifications with the use of language quality patterns and guiding rules. To ensure the applicability of our approach, we study different sets of requirements documents from several domains. We further validate our approach by rewriting the requirements statements derived from these requirements documents.", "fno": "26870199", "keywords": [ "Natural Language Requirements Specifications", "Guiding Rules", "Language Patterns" ], "authors": [ { "affiliation": "University of Nottingham Malaysia Campus, Malaysia", "fullName": "Sri Fatimah Tjong", "givenName": "Sri Fatimah", "surname": "Tjong", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Nottingham Malaysia Campus, Malaysia", "fullName": "Nasreddine Hallam", "givenName": "Nasreddine", "surname": "Hallam", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Nottingham Malaysia Campus, Malaysia", "fullName": "Michael Hartley", "givenName": "Michael", "surname": "Hartley", "__typename": "ArticleAuthorType" } ], "idPrefix": "cit", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-09-01T00:00:00", "pubType": "proceedings", "pages": "199", "year": "2006", "issn": null, "isbn": "0-7695-2687-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "26870197", "articleId": "12OmNylKB3T", "__typename": "AdjacentArticleType" }, "next": { "fno": "26870202", "articleId": "12OmNqBbHBM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/hicss/1995/6940/0/69400362", "title": "Requirements validation via automated natural language parsing", "doi": null, "abstractUrl": "/proceedings-article/hicss/1995/69400362/12OmNC36tPp", "parentPublication": { "id": "proceedings/hicss/1995/6940/0", "title": "28th Hawaii International Conference on System Sciences (HICSS'95)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icre/2000/0565/0/05650140", "title": "Lightweight Validation of Natural Language Requirements: A Case Study", "doi": null, "abstractUrl": "/proceedings-article/icre/2000/05650140/12OmNqJHFzG", "parentPublication": { "id": "proceedings/icre/2000/0565/0", "title": "Requirements Engineering, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/1997/7961/0/79610036", "title": "Processing natural language requirements", "doi": null, "abstractUrl": "/proceedings-article/ase/1997/79610036/12OmNvnOwwv", "parentPublication": { "id": "proceedings/ase/1997/7961/0", "title": "Proceedings 12th IEEE International Conference Automated Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/issre/2009/3878/0/3878a185", "title": "Automated Identification of LTL Patterns in Natural Language Requirements", "doi": null, "abstractUrl": "/proceedings-article/issre/2009/3878a185/12OmNxWLTn8", "parentPublication": { "id": "proceedings/issre/2009/3878/0", "title": "2009 20th International Symposium on Software Reliability Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2005/9313/0/01577173", "title": "Formalising a derivation strategy for formal specifications from natural language requirements models", "doi": null, "abstractUrl": "/proceedings-article/isspit/2005/01577173/12OmNxvwoUO", "parentPublication": { "id": "proceedings/isspit/2005/9313/0", "title": "2005 IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/swste/2003/2047/0/20470080", "title": "Higher Quality Requirements Specifications through Natural Language Patterns", "doi": null, "abstractUrl": "/proceedings-article/swste/2003/20470080/12OmNyFU72u", "parentPublication": { "id": "proceedings/swste/2003/2047/0", "title": "Software Science, Technology and Engineering, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2009/3891/0/3891a680", "title": "An Automated Tool for Generating UML Models from Natural Language Requirements", "doi": null, "abstractUrl": "/proceedings-article/ase/2009/3891a680/12OmNyGbIhN", "parentPublication": { "id": "proceedings/ase/2009/3891/0", "title": "2009 IEEE/ACM International Conference on Automated Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/1988/04/s4066", "title": "A Study of 12 Specifications of the Library Problem", "doi": null, "abstractUrl": "/magazine/so/1988/04/s4066/13rRUwbaqSu", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rew/2021/1898/0/189800a039", "title": "Generating Sequence Diagram from Natural Language Requirements", "doi": null, "abstractUrl": "/proceedings-article/rew/2021/189800a039/1y2JLTdJSGQ", "parentPublication": { "id": "proceedings/rew/2021/1898/0", "title": "2021 IEEE 29th International Requirements Engineering Conference Workshops (REW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rew/2021/1898/0/189800a350", "title": "A Natural Language Processing Technique for Formalization of Systems Requirement Specifications", "doi": null, "abstractUrl": "/proceedings-article/rew/2021/189800a350/1y2JRvoK6Ji", "parentPublication": { "id": "proceedings/rew/2021/1898/0", "title": "2021 IEEE 29th International Requirements Engineering Conference Workshops (REW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrFTr62", "title": "2009 IEEE/ACM International Conference on Automated Software Engineering", "acronym": "ase", "groupId": "1000064", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNyGbIhN", "doi": "10.1109/ASE.2009.48", "title": "An Automated Tool for Generating UML Models from Natural Language Requirements", "normalizedTitle": "An Automated Tool for Generating UML Models from Natural Language Requirements", "abstract": "This paper describes a domain independent tool, named, UML Model Generator from Analysis of Requirements (UMGAR), which generates UML models like the Use-case Diagram, Analysis class model, Collaboration diagram and Design class model from natural language requirements using efficient Natural Language Processing (NLP) tools. UMGAR implements a set of syntactic reconstruction rules to process complex requirements into simple requirements. UMGAR also provides a generic XMI parser to generate XMI files for visualizing the generated models in any UML modeling tool. With respect to the existing tools in this area, UMGAR provides more comprehensive support for generating models with proper relationships, which can be used for large requirement documents.", "abstracts": [ { "abstractType": "Regular", "content": "This paper describes a domain independent tool, named, UML Model Generator from Analysis of Requirements (UMGAR), which generates UML models like the Use-case Diagram, Analysis class model, Collaboration diagram and Design class model from natural language requirements using efficient Natural Language Processing (NLP) tools. UMGAR implements a set of syntactic reconstruction rules to process complex requirements into simple requirements. UMGAR also provides a generic XMI parser to generate XMI files for visualizing the generated models in any UML modeling tool. With respect to the existing tools in this area, UMGAR provides more comprehensive support for generating models with proper relationships, which can be used for large requirement documents.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper describes a domain independent tool, named, UML Model Generator from Analysis of Requirements (UMGAR), which generates UML models like the Use-case Diagram, Analysis class model, Collaboration diagram and Design class model from natural language requirements using efficient Natural Language Processing (NLP) tools. UMGAR implements a set of syntactic reconstruction rules to process complex requirements into simple requirements. UMGAR also provides a generic XMI parser to generate XMI files for visualizing the generated models in any UML modeling tool. With respect to the existing tools in this area, UMGAR provides more comprehensive support for generating models with proper relationships, which can be used for large requirement documents.", "fno": "3891a680", "keywords": [ "Requirement Engineering", "Natural Language Processing", "Unified Modeling Language" ], "authors": [ { "affiliation": null, "fullName": "Deva Kumar Deeptimahanti", "givenName": "Deva Kumar", "surname": "Deeptimahanti", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Muhammad Ali Babar", "givenName": "Muhammad Ali", "surname": "Babar", "__typename": "ArticleAuthorType" } ], "idPrefix": "ase", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-11-01T00:00:00", "pubType": "proceedings", "pages": "680-682", "year": "2009", "issn": "1527-1366", "isbn": "978-0-7695-3891-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3891a677", "articleId": "12OmNyxXlko", "__typename": "AdjacentArticleType" }, "next": { "fno": "3891a683", "articleId": "12OmNAOsMHh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cerma/2007/2974/0/29740360", "title": "UML Sequence Diagram Generator System from Use Case Description Using Natural Language", "doi": null, "abstractUrl": "/proceedings-article/cerma/2007/29740360/12OmNBkxsvM", "parentPublication": { "id": "proceedings/cerma/2007/2974/0", "title": "2007 2nd Electronics, Robotics and Automotive Mechanics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/enase/2014/9999/0/07077118", "title": "Automated generation of activity and sequence diagrams from natural language requirements", "doi": null, "abstractUrl": "/proceedings-article/enase/2014/07077118/12OmNrEL2DV", "parentPublication": { "id": "proceedings/enase/2014/9999/0", "title": "2014 International Conference on Evaluation of Novel Approaches to Software Engineering (ENASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/enase/2014/9999/0/07077126", "title": "Tool independent code generation for the UML closing the gap between proprietary models and the standardized UML model", "doi": null, "abstractUrl": "/proceedings-article/enase/2014/07077126/12OmNvHoQp9", "parentPublication": { "id": "proceedings/enase/2014/9999/0", "title": "2014 International Conference on Evaluation of Novel Approaches to Software Engineering (ENASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seaa/2013/5091/0/06619501", "title": "Img2UML: A System for Extracting UML Models from Images", "doi": null, "abstractUrl": "/proceedings-article/seaa/2013/06619501/12OmNwFid0f", "parentPublication": { "id": "proceedings/seaa/2013/5091/0", "title": "2013 39th EUROMICRO Conference on Software Engineering and Advanced Applications (SEAA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asea/2008/3432/0/3432a077", "title": "Static UML Model Generator from Analysis of Requirements (SUGAR)", "doi": null, "abstractUrl": "/proceedings-article/asea/2008/3432a077/12OmNwHyZY5", "parentPublication": { "id": "proceedings/asea/2008/3432/0", "title": "Advanced Software Engineering and Its Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icstw/2016/3674/0/5830a001", "title": "Automatic Generation of UTP Models from Requirements in Natural Language", "doi": null, "abstractUrl": "/proceedings-article/icstw/2016/5830a001/12OmNwNwzGd", "parentPublication": { "id": "proceedings/icstw/2016/3674/0", "title": "2016 IEEE Ninth International Conference on Software Testing, Verification and Validation Workshops (ICSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisa/2013/0602/0/06579469", "title": "UML Component Diagram to Acme Compiler", "doi": null, "abstractUrl": "/proceedings-article/icisa/2013/06579469/12OmNxFsmL1", "parentPublication": { "id": "proceedings/icisa/2013/0602/0", "title": "2013 International Conference on Information Science and Applications (ICISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aire/2015/0125/0/07337625", "title": "From natural language requirements to UML class diagrams", "doi": null, "abstractUrl": "/proceedings-article/aire/2015/07337625/12OmNzICES0", "parentPublication": { "id": "proceedings/aire/2015/0125/0", "title": "2015 IEEE Second International Workshop on Artificial Intelligence for Requirements Engineering (AIRE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rew/2021/1898/0/189800a039", "title": "Generating Sequence Diagram from Natural Language Requirements", "doi": null, "abstractUrl": "/proceedings-article/rew/2021/189800a039/1y2JLTdJSGQ", "parentPublication": { "id": "proceedings/rew/2021/1898/0", "title": "2021 IEEE 29th International Requirements Engineering Conference Workshops (REW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/models-c/2021/2484/0/248400a380", "title": "From Prose to Prototype: Synthesising Executable UML Models from Natural Language", "doi": null, "abstractUrl": "/proceedings-article/models-c/2021/248400a380/1zutCPt9eQ8", "parentPublication": { "id": "proceedings/models-c/2021/2484/0", "title": "2021 ACM/IEEE International Conference on Model Driven Engineering Languages and Systems Companion (MODELS-C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "18qcbBot8pq", "title": "2019 IEEE 13th International Conference on Semantic Computing (ICSC)", "acronym": "icsc", "groupId": "1001356", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "18qcdZ7PxBe", "doi": "10.1109/ICOSC.2019.8665517", "title": "Controlled Natural Language Framework for Generating Assertions from Hardware Specifications", "normalizedTitle": "Controlled Natural Language Framework for Generating Assertions from Hardware Specifications", "abstract": "In this paper, we present a controlled natural language (CNL) framework for automatic processing and generation of assertions from hardware design specification. Current CNL systems have limitations in mapping differently worded sentences with the same meaning to the same logic structures. We aim to mitigate this limitation by developing a dependency grammar based CNL where the constructed parse tree does not follow strict surface-structure dependencies and instead extract additional relationship based on semantic information that is embedded in the grammar. In addition, current translation schemes for creating executable assertions from hardware design specifications do not provide feedback on wrongly or ambiguously written input sentences. Our natural language understanding algorithm is guided by the dependencies in the parse tree and has the capability to offer useful feedback for sentences that are not fully understood. We reported results on natural language assertions extracted from UART, Memory and AMBA AXI protocol specification documents. We successfully tested syntactic variations of these specifications as well.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a controlled natural language (CNL) framework for automatic processing and generation of assertions from hardware design specification. Current CNL systems have limitations in mapping differently worded sentences with the same meaning to the same logic structures. We aim to mitigate this limitation by developing a dependency grammar based CNL where the constructed parse tree does not follow strict surface-structure dependencies and instead extract additional relationship based on semantic information that is embedded in the grammar. In addition, current translation schemes for creating executable assertions from hardware design specifications do not provide feedback on wrongly or ambiguously written input sentences. Our natural language understanding algorithm is guided by the dependencies in the parse tree and has the capability to offer useful feedback for sentences that are not fully understood. We reported results on natural language assertions extracted from UART, Memory and AMBA AXI protocol specification documents. We successfully tested syntactic variations of these specifications as well.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a controlled natural language (CNL) framework for automatic processing and generation of assertions from hardware design specification. Current CNL systems have limitations in mapping differently worded sentences with the same meaning to the same logic structures. We aim to mitigate this limitation by developing a dependency grammar based CNL where the constructed parse tree does not follow strict surface-structure dependencies and instead extract additional relationship based on semantic information that is embedded in the grammar. In addition, current translation schemes for creating executable assertions from hardware design specifications do not provide feedback on wrongly or ambiguously written input sentences. Our natural language understanding algorithm is guided by the dependencies in the parse tree and has the capability to offer useful feedback for sentences that are not fully understood. We reported results on natural language assertions extracted from UART, Memory and AMBA AXI protocol specification documents. We successfully tested syntactic variations of these specifications as well.", "fno": "08665517", "keywords": [ "Grammars", "Natural Language Processing", "Text Analysis", "Trees Mathematics", "Logic Structures", "Dependency Grammar", "Constructed Parse Tree", "Strict Surface Structure Dependencies", "Executable Assertions", "Hardware Design Specification", "Wrongly Written Input Sentences", "Ambiguously Written Input Sentences", "Natural Language Understanding Algorithm", "Natural Language Assertions", "Controlled Natural Language Framework", "Hardware Specifications", "Automatic Processing", "CNL Systems", "Parse Tree", "Semantics", "Grammar", "Syntactics", "Natural Languages", "Clocks", "Hardware", "Connectors" ], "authors": [ { "affiliation": "Virginia Tech Blacksburg, Department of Electrical and Computer Engineering, Virginia, 24061, USA", "fullName": "Rahul Krishnamurthy", "givenName": "Rahul", "surname": "Krishnamurthy", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech Blacksburg, Department of Electrical and Computer Engineering, Virginia, 24061, USA", "fullName": "Michael S. Hsiao", "givenName": "Michael S.", "surname": "Hsiao", "__typename": "ArticleAuthorType" } ], "idPrefix": "icsc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-01-01T00:00:00", "pubType": "proceedings", "pages": "367-370", "year": "2019", "issn": "2325-6516", "isbn": "978-1-5386-6783-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08665630", "articleId": "18qcgo1zEXu", "__typename": "AdjacentArticleType" }, "next": { "fno": "08665509", "articleId": "18qcejvxIGs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iciev/2016/1269/0/07760001", "title": "Bangla grammar pattern recognition using shift reduce parser", "doi": null, "abstractUrl": "/proceedings-article/iciev/2016/07760001/12OmNAlNiKx", "parentPublication": { "id": "proceedings/iciev/2016/1269/0", "title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icst/2018/5012/0/501201a428", "title": "Testing Natural Language Grammars", "doi": null, "abstractUrl": "/proceedings-article/icst/2018/501201a428/12OmNC8dgqe", "parentPublication": { "id": "proceedings/icst/2018/5012/0", "title": "2018 IEEE 11th International Conference on Software Testing, Verification and Validation (ICST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2010/8333/2/05743475", "title": "Syntax-Directed Machine Translation of Natural Language: Effect of Garden Path Phenomenon on Sentence Structure", "doi": null, "abstractUrl": "/proceedings-article/isdea/2010/05743475/12OmNCdk2MW", "parentPublication": { "id": "proceedings/isdea/2010/8333/2", "title": "2010 International Conference on Intelligent System Design and Engineering Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2013/5096/0/5096a309", "title": "A Study of Parsing Process on Natural Language Processing in Bahasa Indonesia", "doi": null, "abstractUrl": "/proceedings-article/cse/2013/5096a309/12OmNxEBz4A", "parentPublication": { "id": "proceedings/cse/2013/5096/0", "title": "2013 IEEE 16th International Conference on Computational Science and Engineering (CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/naturalise/2013/6271/0/06611716", "title": "Capturing assertions from natural language descriptions", "doi": null, "abstractUrl": "/proceedings-article/naturalise/2013/06611716/12OmNxGj9Ir", "parentPublication": { "id": "proceedings/naturalise/2013/6271/0", "title": "2013 1st International Workshop on Natural Language Analysis in Software Engineering (NaturaLiSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/repa/2014/6328/0/06894838", "title": "Semantic annotation of a formal grammar by SemanticPatterns", "doi": null, "abstractUrl": "/proceedings-article/repa/2014/06894838/12OmNxaeu33", "parentPublication": { "id": "proceedings/repa/2014/6328/0", "title": "2014 IEEE 4th International Workshop on Requirements Patterns (RePa)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ialp/2011/4554/0/4554a015", "title": "Natural Language Grammar Induction of Indonesian Language Corpora Using Genetic Algorithm", "doi": null, "abstractUrl": "/proceedings-article/ialp/2011/4554a015/12OmNyprnt4", "parentPublication": { "id": "proceedings/ialp/2011/4554/0", "title": "Asian Language Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi/2018/7325/0/732500a017", "title": "High Accuracy Question Answering via Hybrid Controlled Natural Language", "doi": null, "abstractUrl": "/proceedings-article/wi/2018/732500a017/17D45VTRowq", "parentPublication": { "id": "proceedings/wi/2018/7325/0", "title": "2018 IEEE/WIC/ACM International Conference on Web Intelligence (WI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2022/9221/0/922100b008", "title": "Fuzzing Class Specifications", "doi": null, "abstractUrl": "/proceedings-article/icse/2022/922100b008/1Ems72xfFOE", "parentPublication": { "id": "proceedings/icse/2022/9221/0", "title": "2022 IEEE/ACM 44th International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccd/2020/9710/0/971000a393", "title": "Transforming Natural Language Specifications to Logical Forms for Hardware Verification", "doi": null, "abstractUrl": "/proceedings-article/iccd/2020/971000a393/1pK5890Y8dW", "parentPublication": { "id": "proceedings/iccd/2020/9710/0", "title": "2020 IEEE 38th International Conference on Computer Design (ICCD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cMFqXmYQRW", "title": "2019 IEEE/ACM 41st International Conference on Software Engineering (ICSE)", "acronym": "icse", "groupId": "1000691", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cMFuexw06Y", "doi": "10.1109/ICSE.2019.00035", "title": "Automatically Generating Precise Oracles from Structured Natural Language Specifications", "normalizedTitle": "Automatically Generating Precise Oracles from Structured Natural Language Specifications", "abstract": "Software specifications often use natural language to describe the desired behavior, but such specifications are difficult to verify automatically. We present Swami, an automated technique that extracts test oracles and generates executable tests from structured natural language specifications. Swami focuses on exceptional behavior and boundary conditions that often cause field failures but that developers often fail to manually write tests for. Evaluated on the official JavaScript specification (ECMA-262), 98.4% of the tests Swami generated were precise to the specification. Using Swami to augment developer-written test suites improved coverage and identified 1 previously unknown defect and 15 missing JavaScript features in Rhino, 1 previously unknown defect in Node.js, and 18 semantic ambiguities in the ECMA-262 specification.", "abstracts": [ { "abstractType": "Regular", "content": "Software specifications often use natural language to describe the desired behavior, but such specifications are difficult to verify automatically. We present Swami, an automated technique that extracts test oracles and generates executable tests from structured natural language specifications. Swami focuses on exceptional behavior and boundary conditions that often cause field failures but that developers often fail to manually write tests for. Evaluated on the official JavaScript specification (ECMA-262), 98.4% of the tests Swami generated were precise to the specification. Using Swami to augment developer-written test suites improved coverage and identified 1 previously unknown defect and 15 missing JavaScript features in Rhino, 1 previously unknown defect in Node.js, and 18 semantic ambiguities in the ECMA-262 specification.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Software specifications often use natural language to describe the desired behavior, but such specifications are difficult to verify automatically. We present Swami, an automated technique that extracts test oracles and generates executable tests from structured natural language specifications. Swami focuses on exceptional behavior and boundary conditions that often cause field failures but that developers often fail to manually write tests for. Evaluated on the official JavaScript specification (ECMA-262), 98.4% of the tests Swami generated were precise to the specification. Using Swami to augment developer-written test suites improved coverage and identified 1 previously unknown defect and 15 missing JavaScript features in Rhino, 1 previously unknown defect in Node.js, and 18 semantic ambiguities in the ECMA-262 specification.", "fno": "086900a188", "keywords": [ "Formal Specification", "Java", "Natural Language Processing", "Program Testing", "Structured Natural Language Specifications", "Developer Written Test Suites", "ECMA 262 Specification", "Software Specifications", "Test Oracles", "Swami Technique", "Java Script Features", "Java Script Specification", "Natural Languages", "Documentation", "Lenses", "Software", "Boundary Conditions", "Semantics", "Prototypes", "Oracle", "Test Oracle", "Test Generation", "Natural Language Specification" ], "authors": [ { "affiliation": "University of Massachusetts Amherst", "fullName": "Manish Motwani", "givenName": "Manish", "surname": "Motwani", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Massachusetts Amherst", "fullName": "Yuriy Brun", "givenName": "Yuriy", "surname": "Brun", "__typename": "ArticleAuthorType" } ], "idPrefix": "icse", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-05-01T00:00:00", "pubType": "proceedings", "pages": "188-199", "year": "2019", "issn": null, "isbn": "978-1-7281-0869-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "086900a176", "articleId": "1cMFvLkYfoQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "086900a200", "articleId": "1cMFrzFH13G", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/apsec/2017/3681/0/3681a368", "title": "Mining API Type Specifications for JavaScript", "doi": null, "abstractUrl": "/proceedings-article/apsec/2017/3681a368/12OmNApcuCI", "parentPublication": { "id": "proceedings/apsec/2017/3681/0", "title": "2017 24th Asia-Pacific Software Engineering Conference (APSEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apsec/2003/2011/0/20110322", "title": "Tool Support for Generating Passive C++ Test Oracles from Object-Z Specifications", "doi": null, "abstractUrl": "/proceedings-article/apsec/2003/20110322/12OmNBcShVc", "parentPublication": { "id": "proceedings/apsec/2003/2011/0", "title": "Tenth Asia-Pacific Software Engineering Conference, 2003.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssiri/2009/3758/0/3758a348", "title": "Generating Test Cases for Timed Systems from Controlled Natural Language Specifications", "doi": null, "abstractUrl": "/proceedings-article/ssiri/2009/3758a348/12OmNBtl1E6", "parentPublication": { "id": "proceedings/ssiri/2009/3758/0", "title": "Secure System Integration and Reliability Improvement", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/qsic/2003/2015/0/20150091", "title": "A Note on Test Oracles and Semantics of Algebraic Specifications", "doi": null, "abstractUrl": "/proceedings-article/qsic/2003/20150091/12OmNC4O4Du", "parentPublication": { "id": "proceedings/qsic/2003/2015/0", "title": "Quality Software, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isca/2018/5984/0/598401a247", "title": "ProtoGen: Automatically Generating Directory Cache Coherence Protocols from Atomic Specifications", "doi": null, "abstractUrl": "/proceedings-article/isca/2018/598401a247/12OmNC8uRnK", "parentPublication": { "id": "proceedings/isca/2018/5984/0", "title": "2018 ACM/IEEE 45th Annual International Symposium on Computer Architecture (ISCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hldvt/2006/0679/0/04110052", "title": "DVGen: Increasing Coverage by Automatically Combining Test Specifications", "doi": null, "abstractUrl": "/proceedings-article/hldvt/2006/04110052/12OmNzWx00I", "parentPublication": { "id": "proceedings/hldvt/2006/0679/0", "title": "2006 IEEE International High Level Design Validation and Test Workshop", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dysdoc3/2018/7570/0/757000a001", "title": "Automatically Generating Natural Language Documentation for Methods", "doi": null, "abstractUrl": "/proceedings-article/dysdoc3/2018/757000a001/17D45Xh13pW", "parentPublication": { "id": "proceedings/dysdoc3/2018/7570/0", "title": "2018 IEEE Third International Workshop on Dynamic Software Documentation (DySDoc3)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2019/6783/0/08665517", "title": "Controlled Natural Language Framework for Generating Assertions from Hardware Specifications", "doi": null, "abstractUrl": "/proceedings-article/icsc/2019/08665517/18qcdZ7PxBe", "parentPublication": { "id": "proceedings/icsc/2019/6783/0", "title": "2019 IEEE 13th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2021/0337/0/033700a606", "title": "JSTAR: JavaScript Specification Type Analyzer using Refinement", "doi": null, "abstractUrl": "/proceedings-article/ase/2021/033700a606/1AjT8HGq7Je", "parentPublication": { "id": "proceedings/ase/2021/0337/0", "title": "2021 36th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2022/9598/0/959800a056", "title": "DScribe: Co-generating Unit Tests and Documentation", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2022/959800a056/1EaPe2RJ5hC", "parentPublication": { "id": "proceedings/icse-companion/2022/9598/0", "title": "2022 IEEE/ACM 44th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxvO07y", "title": "Visual Languages, IEEE Symposium on", "acronym": "vl", "groupId": "1000793", "volume": "0", "displayVolume": "0", "year": "1995", "__typename": "ProceedingType" }, "article": { "id": "12OmNwErpKe", "doi": "10.1109/VL.1995.520798", "title": "Integrating algorithm animation into a declarative visual programming language", "normalizedTitle": "Integrating algorithm animation into a declarative visual programming language", "abstract": "Until now, only users of textual programming languages have enjoyed the fruits of algorithm animation. Users of visual programming languages (VPLs) have been deprived of the unique semantic insights algorithm animation offers. To begin solving this shortcoming, we have seamlessly integrated algorithm animation capabilities into the VPL Forms/3. Our research shows how a declarative VPL that is responsive can provide features not found in other algorithm animation systems.", "abstracts": [ { "abstractType": "Regular", "content": "Until now, only users of textual programming languages have enjoyed the fruits of algorithm animation. Users of visual programming languages (VPLs) have been deprived of the unique semantic insights algorithm animation offers. To begin solving this shortcoming, we have seamlessly integrated algorithm animation capabilities into the VPL Forms/3. Our research shows how a declarative VPL that is responsive can provide features not found in other algorithm animation systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Until now, only users of textual programming languages have enjoyed the fruits of algorithm animation. Users of visual programming languages (VPLs) have been deprived of the unique semantic insights algorithm animation offers. To begin solving this shortcoming, we have seamlessly integrated algorithm animation capabilities into the VPL Forms/3. Our research shows how a declarative VPL that is responsive can provide features not found in other algorithm animation systems.", "fno": "70450126", "keywords": [ "Computer Animation Visual Languages Computational Linguistics Visual Programming Algorithm Animation Declarative Visual Programming Language Semantic Insights VPL Forms 3" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Oregon State Univ., Corvallis, OR, USA", "fullName": "P. Carlson", "givenName": "P.", "surname": "Carlson", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Oregon State Univ., Corvallis, OR, USA", "fullName": "M.M. Burnett", "givenName": "M.M.", "surname": "Burnett", "__typename": "ArticleAuthorType" } ], "idPrefix": "vl", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1995-09-01T00:00:00", "pubType": "proceedings", "pages": "126", "year": "1995", "issn": "1049-2615", "isbn": "0-8186-7045-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "70450122", "articleId": "12OmNBzRNqS", "__typename": "AdjacentArticleType" }, "next": { "fno": "70450128", "articleId": "12OmNvo67BJ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "1rSR7vfukX6", "title": "2020 24th International Conference Information Visualisation (IV)", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1rSRdbyf9bG", "doi": "10.1109/IV51561.2020.00065", "title": "Comparison of Full-text Articles and Abstracts for Visual Trend Analytics through Natural Language Processing", "normalizedTitle": "Comparison of Full-text Articles and Abstracts for Visual Trend Analytics through Natural Language Processing", "abstract": "Scientific publications are an essential resource for detecting emerging trends and innovations in a very early stage, by far earlier than patents may allow. Thereby Visual Analytics systems enable a deep analysis by applying commonly unsupervised machine learning methods and investigating a mass amount of data. A main question from the Visual Analytics viewpoint in this context is, do abstracts of scientific publications provide a similar analysis capability compared to their corresponding full-texts? This would allow to extract a mass amount of text documents in a much faster manner. We compare in this paper the topic extraction methods LSI and LDA by using full text articles and their corresponding abstracts to obtain which method and which data are better suited for a Visual Analytics system for Technology and Corporate Foresight. Based on a easy replicable natural language processing approach, we further investigate the impact of lemmatization for LDA and LSI. The comparison will be performed qualitative and quantitative to gather both, the human perception in visual systems and coherence values. Based on an application scenario a visual trend analytics system illustrates the outcomes.", "abstracts": [ { "abstractType": "Regular", "content": "Scientific publications are an essential resource for detecting emerging trends and innovations in a very early stage, by far earlier than patents may allow. Thereby Visual Analytics systems enable a deep analysis by applying commonly unsupervised machine learning methods and investigating a mass amount of data. A main question from the Visual Analytics viewpoint in this context is, do abstracts of scientific publications provide a similar analysis capability compared to their corresponding full-texts? This would allow to extract a mass amount of text documents in a much faster manner. We compare in this paper the topic extraction methods LSI and LDA by using full text articles and their corresponding abstracts to obtain which method and which data are better suited for a Visual Analytics system for Technology and Corporate Foresight. Based on a easy replicable natural language processing approach, we further investigate the impact of lemmatization for LDA and LSI. The comparison will be performed qualitative and quantitative to gather both, the human perception in visual systems and coherence values. Based on an application scenario a visual trend analytics system illustrates the outcomes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Scientific publications are an essential resource for detecting emerging trends and innovations in a very early stage, by far earlier than patents may allow. Thereby Visual Analytics systems enable a deep analysis by applying commonly unsupervised machine learning methods and investigating a mass amount of data. A main question from the Visual Analytics viewpoint in this context is, do abstracts of scientific publications provide a similar analysis capability compared to their corresponding full-texts? This would allow to extract a mass amount of text documents in a much faster manner. We compare in this paper the topic extraction methods LSI and LDA by using full text articles and their corresponding abstracts to obtain which method and which data are better suited for a Visual Analytics system for Technology and Corporate Foresight. Based on a easy replicable natural language processing approach, we further investigate the impact of lemmatization for LDA and LSI. The comparison will be performed qualitative and quantitative to gather both, the human perception in visual systems and coherence values. Based on an application scenario a visual trend analytics system illustrates the outcomes.", "fno": "913400a360", "keywords": [ "Data Analysis", "Data Mining", "Data Visualisation", "Feature Extraction", "Information Retrieval", "Learning Artificial Intelligence", "Natural Language Processing", "Text Analysis", "Unsupervised Learning", "Corresponding Full Texts", "Text Documents", "Corresponding Abstracts", "Easy Replicable Natural Language Processing Approach", "Visual Systems", "Coherence Values", "Visual Trend Analytics System", "Similar Analysis Capability", "Visual Analytics Viewpoint", "Machine Learning Methods", "Deep Analysis", "Visual Analytics System", "Detecting Emerging Trends", "Essential Resource", "Scientific Publications", "Full Text Articles", "Information Science", "Visual Analytics", "Coherence", "Market Research", "Large Scale Integration", "Natural Language Processing", "Libraries", "Visual Analytics", "Data Science", "Natural Language Processing", "Visual Trend Analytics" ], "authors": [ { "affiliation": "Darmstadt University of Applied Sciences,Human-Computer Interaction & Visual Analytics,Darmstadt,Germany", "fullName": "Kawa Nazemi", "givenName": "Kawa", "surname": "Nazemi", "__typename": "ArticleAuthorType" }, { "affiliation": "Darmstadt University of Applied Sciences,Human-Computer Interaction & Visual Analytics,Darmstadt,Germany", "fullName": "Maike J. Klepsch", "givenName": "Maike J.", "surname": "Klepsch", "__typename": "ArticleAuthorType" }, { "affiliation": "Darmstadt University of Applied Sciences,Human-Computer Interaction & Visual Analytics,Darmstadt,Germany", "fullName": "Dirk Burkhardt", "givenName": "Dirk", "surname": "Burkhardt", "__typename": "ArticleAuthorType" }, { "affiliation": "Darmstadt University of Applied Sciences,Human-Computer Interaction & Visual Analytics,Darmstadt,Germany", "fullName": "Lukas Kaupp", "givenName": "Lukas", "surname": "Kaupp", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-09-01T00:00:00", "pubType": "proceedings", "pages": "360-367", "year": "2020", "issn": null, "isbn": "978-1-7281-9134-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "913400a350", "articleId": "1rSReicCeD6", "__typename": "AdjacentArticleType" }, "next": { "fno": "913400a368", "articleId": "1rSRaA2LJBK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/hicss/2013/4892/0/4892c416", "title": "Visual Analytics for Public Health: Supporting Knowledge Construction and Decision-Making", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892c416/12OmNrJiCNq", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2011/9618/0/05718616", "title": "Pair Analytics: Capturing Reasoning Processes in Collaborative Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/hicss/2011/05718616/12OmNvAiShB", "parentPublication": { "id": "proceedings/hicss/2011/9618/0", "title": "2011 44th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2009/3733/0/3733a197", "title": "A Visualization Approach for Frauds Detection in Financial Market", "doi": null, "abstractUrl": "/proceedings-article/iv/2009/3733a197/12OmNvnOwqz", "parentPublication": { "id": "proceedings/iv/2009/3733/0", "title": "2009 13th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2012/4925/0/4925a894", "title": "Cubix: A Visual Analytics Tool for Conceptual and Semantic Data", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2012/4925a894/12OmNz5JCf2", "parentPublication": { "id": "proceedings/icdmw/2012/4925/0", "title": "2012 IEEE 12th International Conference on Data Mining Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2008/01/mcg2008010018", "title": "An Information-Theoretic View of Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2008/01/mcg2008010018/13rRUB6SpRW", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013121992", "title": "UTOPIAN: User-Driven Topic Modeling Based on Interactive Nonnegative Matrix Factorization", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013121992/13rRUIIVlcI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08019833", "title": "Applying Pragmatics Principles for Interaction with Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2018/01/08019833/13rRUNvgz9X", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/08/mco2013080090", "title": "Bixplorer: Visual Analytics with Biclusters", "doi": null, "abstractUrl": "/magazine/co/2013/08/mco2013080090/13rRUwcAqvs", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122869", "title": "Examining the Use of a Visual Analytics System for Sensemaking Tasks: Case Studies with Domain Experts", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122869/13rRUxNmPDT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222269", "title": "LineSmooth: An Analytical Framework for Evaluating the Effectiveness of Smoothing Techniques on Line Charts", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222269/1nTrpaFo2f6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirs", "title": "2018 7th Brazilian Conference on Intelligent Systems (BRACIS)", "acronym": "bracis", "groupId": "1803430", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45VTRoBx", "doi": "10.1109/BRACIS.2018.00029", "title": "Bandit-Based Automated Machine Learning", "normalizedTitle": "Bandit-Based Automated Machine Learning", "abstract": "Machine Learning (ML) has been successfully applied to a wide range of domains and applications. Since the number of ML applications is growing, there is a need for tools that boost the data scientist's productivity. Automated Machine Learning (AutoML) is the field of ML that aims to address these needs through the development of solutions which enable data science practitioners, experts and non-experts, to efficiently create fine-tuned predictive models with minimum intervention. In this paper, we present the application of the multi-armed bandit optimization algorithm Hyperband to address the AutoML problem of generating customized classification workflows, a combination of preprocessing methods and ML algorithms including hyperparameter optimization. Experimental results comparing the bandit-based approach against Auto ML Bayesian Optimization methods show that this new approach is superior to the state-of-the-art methods in the test evaluation and equivalent to them in a statistical analysis.", "abstracts": [ { "abstractType": "Regular", "content": "Machine Learning (ML) has been successfully applied to a wide range of domains and applications. Since the number of ML applications is growing, there is a need for tools that boost the data scientist's productivity. Automated Machine Learning (AutoML) is the field of ML that aims to address these needs through the development of solutions which enable data science practitioners, experts and non-experts, to efficiently create fine-tuned predictive models with minimum intervention. In this paper, we present the application of the multi-armed bandit optimization algorithm Hyperband to address the AutoML problem of generating customized classification workflows, a combination of preprocessing methods and ML algorithms including hyperparameter optimization. Experimental results comparing the bandit-based approach against Auto ML Bayesian Optimization methods show that this new approach is superior to the state-of-the-art methods in the test evaluation and equivalent to them in a statistical analysis.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Machine Learning (ML) has been successfully applied to a wide range of domains and applications. Since the number of ML applications is growing, there is a need for tools that boost the data scientist's productivity. Automated Machine Learning (AutoML) is the field of ML that aims to address these needs through the development of solutions which enable data science practitioners, experts and non-experts, to efficiently create fine-tuned predictive models with minimum intervention. In this paper, we present the application of the multi-armed bandit optimization algorithm Hyperband to address the AutoML problem of generating customized classification workflows, a combination of preprocessing methods and ML algorithms including hyperparameter optimization. Experimental results comparing the bandit-based approach against Auto ML Bayesian Optimization methods show that this new approach is superior to the state-of-the-art methods in the test evaluation and equivalent to them in a statistical analysis.", "fno": "802300a121", "keywords": [ "Bayes Methods", "Learning Artificial Intelligence", "Optimisation", "Statistical Analysis", "Fine Tuned Predictive Models", "Auto ML Problem", "Customized Classification Workflows", "Statistical Analysis", "Auto ML Bayesian Optimization Methods", "Multiarmed Bandit Optimization Algorithm Hyperband", "Bandit Based Automated Machine Learning", "Hyperparameter Optimization", "Optimization", "Machine Learning Algorithms", "Machine Learning", "Bayes Methods", "Feature Extraction", "Task Analysis", "Search Problems", "Automl", "Autoband", "Workflow Selection", "Machine Learning" ], "authors": [ { "affiliation": null, "fullName": "Silvia Cristina Nunes das Dôres", "givenName": "Silvia Cristina Nunes", "surname": "das Dôres", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Carlos Soares", "givenName": "Carlos", "surname": "Soares", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Duncan Ruiz", "givenName": "Duncan", "surname": "Ruiz", "__typename": "ArticleAuthorType" } ], "idPrefix": "bracis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "121-126", "year": "2018", "issn": null, "isbn": "978-1-5386-8023-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "802300a115", "articleId": "17D45VTRoA8", "__typename": "AdjacentArticleType" }, "next": { "fno": "802300a127", "articleId": "17D45WYQJ5s", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icsme/2022/7956/0/795600a059", "title": "An Empirical Study on the Usage of Automated Machine Learning Tools", "doi": null, "abstractUrl": "/proceedings-article/icsme/2022/795600a059/1JeFkmlWKFq", "parentPublication": { "id": "proceedings/icsme/2022/7956/0", "title": "2022 IEEE International Conference on Software Maintenance and Evolution (ICSME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icebe/2022/9244/0/924400a094", "title": "Automated Machine Learning for Steel Production: A Case Study of TPOT for Material Mechanical Property Prediction", "doi": null, "abstractUrl": "/proceedings-article/icebe/2022/924400a094/1KzzfZzw0Ss", "parentPublication": { "id": "proceedings/icebe/2022/9244/0", "title": "2022 IEEE International Conference on e-Business Engineering (ICEBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09005963", "title": "Modeling and Forecasting Armed Conflict: AutoML with Human-Guided Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09005963/1hJs1JWYiOY", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2019/3798/0/379800b471", "title": "Towards Automated Machine Learning: Evaluation and Comparison of AutoML Approaches and Tools", "doi": null, "abstractUrl": "/proceedings-article/ictai/2019/379800b471/1hrLRPyQ8co", "parentPublication": { "id": "proceedings/ictai/2019/3798/0", "title": "2019 IEEE 31st International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222086", "title": "<italic>PipelineProfiler:</italic> A Visual Analytics Tool for the Exploration of AutoML Pipelines", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222086/1nTrpup4LZe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisce/2020/9761/0/976100a474", "title": "Analysis on Approaches and Structures of Automated Machine Learning Frameworks", "doi": null, "abstractUrl": "/proceedings-article/cisce/2020/976100a474/1oUCUVTOiVG", "parentPublication": { "id": "proceedings/cisce/2020/9761/0", "title": "2020 International Conference on Communications, Information System and Computer Engineering (CISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/09/09366792", "title": "Adaptation Strategies for Automated Machine Learning on Evolving Data", "doi": null, "abstractUrl": "/journal/tp/2021/09/09366792/1rDQNoFcVnq", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcs/2020/7002/0/700200b215", "title": "D-SmartML: A Distributed Automated Machine Learning Framework", "doi": null, "abstractUrl": "/proceedings-article/icdcs/2020/700200b215/1rsiPv0Mn9m", "parentPublication": { "id": "proceedings/icdcs/2020/7002/0", "title": "2020 IEEE 40th International Conference on Distributed Computing Systems (ICDCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/09/09388886", "title": "Evolving Fully Automated Machine Learning via Life-Long Knowledge Anchors", "doi": null, "abstractUrl": "/journal/tp/2021/09/09388886/1smZJB1wYog", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pact/2021/4278/0/427800a001", "title": "A Flexible Approach to Autotuning Multi-Pass Machine Learning Compilers", "doi": null, "abstractUrl": "/proceedings-article/pact/2021/427800a001/1xNNt84QqWI", "parentPublication": { "id": "proceedings/pact/2021/4278/0", "title": "2021 30th International Conference on Parallel Architectures and Compilation Techniques (PACT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1AjSCovc0wM", "title": "2021 International Conference on Data Mining Workshops (ICDMW)", "acronym": "icdmw", "groupId": "1001620", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1AjSENasAfK", "doi": "10.1109/ICDMW53433.2021.00037", "title": "Stochastic Schemata Exploiter-Based AutoML", "normalizedTitle": "Stochastic Schemata Exploiter-Based AutoML", "abstract": "Machine Learning (ML) models are often determined by relying on knowledge and experience. In recent years, many automatic building methods are proposed, but there are some problems related to accuracy, computation cost, and explainability. We propose the Stochastic Schemata Exploiter-Based AutoML. The Stochastic Schemata Exploiter (SSE) is one of the Evolutionary Algorithms. In each generation, SSE calculates fitness of the individuals, defines the best subsets according to their average fitness, generates individuals based on the subsets, and applies a mutation operator. Since the original SSE uses a binary string representation, we have to modify the SSE algorithm in the following points for parameter optimization: the initialization method, the schema extraction method, the new individual generation method, the mutation method, and the generation update method. In this paper, we propose a genetic representation of the stacking model and optimize the stacking model using SSE. Compared with the Genetic Algorithm, the Tree-structured Parzen Estimator, the Covariance Matrix Adaptation - Evolution Strategy, and Random Search, SSE shows an interesting feature: a better accuracy for combinatorial optimization problems with categorical, discrete, and continuous variables, such as hyper-parameter optimization. In addition, we propose the visualization of the process of SSE. The visualization helps us to understand the process, which is another advantage of the SSE-based optimization (SSEopt).", "abstracts": [ { "abstractType": "Regular", "content": "Machine Learning (ML) models are often determined by relying on knowledge and experience. In recent years, many automatic building methods are proposed, but there are some problems related to accuracy, computation cost, and explainability. We propose the Stochastic Schemata Exploiter-Based AutoML. The Stochastic Schemata Exploiter (SSE) is one of the Evolutionary Algorithms. In each generation, SSE calculates fitness of the individuals, defines the best subsets according to their average fitness, generates individuals based on the subsets, and applies a mutation operator. Since the original SSE uses a binary string representation, we have to modify the SSE algorithm in the following points for parameter optimization: the initialization method, the schema extraction method, the new individual generation method, the mutation method, and the generation update method. In this paper, we propose a genetic representation of the stacking model and optimize the stacking model using SSE. Compared with the Genetic Algorithm, the Tree-structured Parzen Estimator, the Covariance Matrix Adaptation - Evolution Strategy, and Random Search, SSE shows an interesting feature: a better accuracy for combinatorial optimization problems with categorical, discrete, and continuous variables, such as hyper-parameter optimization. In addition, we propose the visualization of the process of SSE. The visualization helps us to understand the process, which is another advantage of the SSE-based optimization (SSEopt).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Machine Learning (ML) models are often determined by relying on knowledge and experience. In recent years, many automatic building methods are proposed, but there are some problems related to accuracy, computation cost, and explainability. We propose the Stochastic Schemata Exploiter-Based AutoML. The Stochastic Schemata Exploiter (SSE) is one of the Evolutionary Algorithms. In each generation, SSE calculates fitness of the individuals, defines the best subsets according to their average fitness, generates individuals based on the subsets, and applies a mutation operator. Since the original SSE uses a binary string representation, we have to modify the SSE algorithm in the following points for parameter optimization: the initialization method, the schema extraction method, the new individual generation method, the mutation method, and the generation update method. In this paper, we propose a genetic representation of the stacking model and optimize the stacking model using SSE. Compared with the Genetic Algorithm, the Tree-structured Parzen Estimator, the Covariance Matrix Adaptation - Evolution Strategy, and Random Search, SSE shows an interesting feature: a better accuracy for combinatorial optimization problems with categorical, discrete, and continuous variables, such as hyper-parameter optimization. In addition, we propose the visualization of the process of SSE. The visualization helps us to understand the process, which is another advantage of the SSE-based optimization (SSEopt).", "fno": "242700a238", "keywords": [ "Adaptation Models", "Stacking", "Time Series Analysis", "Stochastic Processes", "Machine Learning", "Search Problems", "Data Models", "Auto ML", "Hyper Parameter Optimization", "Stacking", "Stochastic Schemata Exploiter", "Evolutionary Algorithm", "Explainable AI" ], "authors": [ { "affiliation": "Nagoya University,Graduate School of Informatics,Nagoya,Japan", "fullName": "Hiroya Makino", "givenName": "Hiroya", "surname": "Makino", "__typename": "ArticleAuthorType" }, { "affiliation": "Nagoya University,Graduate School of Informatics,Nagoya,Japan", "fullName": "Eisuke Kita", "givenName": "Eisuke", "surname": "Kita", "__typename": "ArticleAuthorType" } ], "idPrefix": "icdmw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-12-01T00:00:00", "pubType": "proceedings", "pages": "238-245", "year": "2021", "issn": null, "isbn": "978-1-6654-2427-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "242700a228", "articleId": "1AjSFXrmnAc", "__typename": "AdjacentArticleType" }, "next": { "fno": "242700a246", "articleId": "1AjSMR7KCt2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/hpcc/2016/4297/0/07828474", "title": "A Workload Prediction Approach Using Models Stacking Based on Recurrent Neural Network and Autoencoder", "doi": null, "abstractUrl": "/proceedings-article/hpcc/2016/07828474/12OmNBWi6IS", "parentPublication": { "id": "proceedings/hpcc/2016/4297/0", "title": "2016 IEEE 18th International Conference on High-Performance Computing and Communications, IEEE 14th International Conference on Smart City, and IEEE 2nd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mcsi/2017/2820/0/2820a190", "title": "Acceleration of Grammatical Evolution with Multiple Chromosome by Using Stochastic Schemata Exploiter", "doi": null, "abstractUrl": "/proceedings-article/mcsi/2017/2820a190/12OmNCd2rW7", "parentPublication": { "id": "proceedings/mcsi/2017/2820/0", "title": "2017 Fourth International Conference on Mathematics and Computers in Sciences and in Industry (MCSI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wsc/2006/0500/0/04117611", "title": "A Comparison of Sample-Path-Based Simulation-Optimization and Stochastic Decomposition for Multi-Location Transshipment Problems", "doi": null, "abstractUrl": "/proceedings-article/wsc/2006/04117611/12OmNvDqsS0", "parentPublication": { "id": "proceedings/wsc/2006/0500/0", "title": "2006 Winter Simulation Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cicn/2010/4254/0/4254a628", "title": "Evolutionary Algorithms for Solving Stochastic Programming Problems", "doi": null, "abstractUrl": "/proceedings-article/cicn/2010/4254a628/12OmNxy4N5i", "parentPublication": { "id": "proceedings/cicn/2010/4254/0", "title": "Computational Intelligence and Communication Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dfm/2012/4954/0/4954a001", "title": "Determinacy and Repeatability of Parallel Program Schemata", "doi": null, "abstractUrl": "/proceedings-article/dfm/2012/4954a001/12OmNyUFg3d", "parentPublication": { "id": "proceedings/dfm/2012/4954/0", "title": "2012 Data-Flow Execution Models for Extreme Scale Computing (DFM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/1986/0655/0/07266272", "title": "On the power to derive external schemata from the database schema", "doi": null, "abstractUrl": "/proceedings-article/icde/1986/07266272/12OmNzzP5Aj", "parentPublication": { "id": "proceedings/icde/1986/0655/0", "title": "1986 IEEE Second International Conference on Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/1975/12/01672758", "title": "Stochastic Syntax-Directed Translation Schemata for Correction of Errors in Context-Free Languages", "doi": null, "abstractUrl": "/journal/tc/1975/12/01672758/13rRUIJcWvL", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaice/2021/2186/0/218600a623", "title": "Application of Stacking ensemble learning in option implied volatility", "doi": null, "abstractUrl": "/proceedings-article/icaice/2021/218600a623/1Et4GjvxHKE", "parentPublication": { "id": "proceedings/icaice/2021/2186/0", "title": "2021 2nd International Conference on Artificial Intelligence and Computer Engineering (ICAICE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icemme/2020/9144/0/914400a992", "title": "Default Identification of P2P Lending Based on Stacking Ensemble Learning", "doi": null, "abstractUrl": "/proceedings-article/icemme/2020/914400a992/1tV9qS30nxS", "parentPublication": { "id": "proceedings/icemme/2020/9144/0", "title": "2020 2nd International Conference on Economic Management and Model Engineering (ICEMME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413168", "title": "Comparison of Stacking-based Classifier Ensembles using Euclidean and Riemannian Geometries", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413168/1tmjPxL5rUs", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J4CiR0iIBa", "title": "2022 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)", "acronym": "dasc-picom-cbdcom-cyberscitech", "groupId": "9927523", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J4CxIUBcQw", "doi": "10.1109/DASC/PiCom/CBDCom/Cy55231.2022.9927837", "title": "DataXc: Flexible and efficient communication in microservices-based stream analytics pipelines", "normalizedTitle": "DataXc: Flexible and efficient communication in microservices-based stream analytics pipelines", "abstract": "A big challenge in changing a monolithic application into a performant microservices-based application is the design of efficient mechanisms for microservices to communicate with each other. Prior proposals range from custom point-to-point communication among microservices using protocols like gRPC to service meshes like Linkerd to a flexible, many-to-many communication using broker-based messaging systems like NATS. We propose a new communication mechanism, DataXc, that is more efficient than prior proposals in terms of message latency, jitter, message processing rate and use of network resources. To the best of our knowledge, DataXc is the first communication design that has the desirable flexibility of a broker-based messaging systems like NATS and the high-performance of a rigid, custom point-to-point communication method.DataXc proposes a novel \"pull\" based communication method (i.e consumers fetch messages from producers). This is unlike prior proposals like NATS, gRPC or Linkerd, all of which are \"push\" based (i.e. producers send messages to consumers). Such communication methods make it difficult to take advantage of differential processing rates of consumers like video analytics tasks. In contrast, DataXc proposes a \"pull\" based design that avoids unnecessary communication of messages that are eventually discarded by the consumers. Also, unlike prior proposals, DataXc successfully addresses several key challenges in streaming video analytics pipelines like non-uniform processing of frames from multiple cameras, and high variance in latency of frames processed by consumers, all of which adversely affect the quality of insights from streaming video analytics.We report results on two popular real-world, streaming video analytics pipelines (video surveillance, and video action recognition). Compared to NATS, DataXc is just as flexible, but it has far superior performance: upto 80% higher processing rate, 3X lower latency, 7.5X lower jitter and 4.5X lower network bandwidth usage. Compared to gRPC or Linkerd, DataXc is highly flexible, achieves up to 2X higher processing rate, lower latency and lower jitter, but it also consumes more network bandwidth.", "abstracts": [ { "abstractType": "Regular", "content": "A big challenge in changing a monolithic application into a performant microservices-based application is the design of efficient mechanisms for microservices to communicate with each other. Prior proposals range from custom point-to-point communication among microservices using protocols like gRPC to service meshes like Linkerd to a flexible, many-to-many communication using broker-based messaging systems like NATS. We propose a new communication mechanism, DataXc, that is more efficient than prior proposals in terms of message latency, jitter, message processing rate and use of network resources. To the best of our knowledge, DataXc is the first communication design that has the desirable flexibility of a broker-based messaging systems like NATS and the high-performance of a rigid, custom point-to-point communication method.DataXc proposes a novel \"pull\" based communication method (i.e consumers fetch messages from producers). This is unlike prior proposals like NATS, gRPC or Linkerd, all of which are \"push\" based (i.e. producers send messages to consumers). Such communication methods make it difficult to take advantage of differential processing rates of consumers like video analytics tasks. In contrast, DataXc proposes a \"pull\" based design that avoids unnecessary communication of messages that are eventually discarded by the consumers. Also, unlike prior proposals, DataXc successfully addresses several key challenges in streaming video analytics pipelines like non-uniform processing of frames from multiple cameras, and high variance in latency of frames processed by consumers, all of which adversely affect the quality of insights from streaming video analytics.We report results on two popular real-world, streaming video analytics pipelines (video surveillance, and video action recognition). Compared to NATS, DataXc is just as flexible, but it has far superior performance: upto 80% higher processing rate, 3X lower latency, 7.5X lower jitter and 4.5X lower network bandwidth usage. Compared to gRPC or Linkerd, DataXc is highly flexible, achieves up to 2X higher processing rate, lower latency and lower jitter, but it also consumes more network bandwidth.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A big challenge in changing a monolithic application into a performant microservices-based application is the design of efficient mechanisms for microservices to communicate with each other. Prior proposals range from custom point-to-point communication among microservices using protocols like gRPC to service meshes like Linkerd to a flexible, many-to-many communication using broker-based messaging systems like NATS. We propose a new communication mechanism, DataXc, that is more efficient than prior proposals in terms of message latency, jitter, message processing rate and use of network resources. To the best of our knowledge, DataXc is the first communication design that has the desirable flexibility of a broker-based messaging systems like NATS and the high-performance of a rigid, custom point-to-point communication method.DataXc proposes a novel \"pull\" based communication method (i.e consumers fetch messages from producers). This is unlike prior proposals like NATS, gRPC or Linkerd, all of which are \"push\" based (i.e. producers send messages to consumers). Such communication methods make it difficult to take advantage of differential processing rates of consumers like video analytics tasks. In contrast, DataXc proposes a \"pull\" based design that avoids unnecessary communication of messages that are eventually discarded by the consumers. Also, unlike prior proposals, DataXc successfully addresses several key challenges in streaming video analytics pipelines like non-uniform processing of frames from multiple cameras, and high variance in latency of frames processed by consumers, all of which adversely affect the quality of insights from streaming video analytics.We report results on two popular real-world, streaming video analytics pipelines (video surveillance, and video action recognition). Compared to NATS, DataXc is just as flexible, but it has far superior performance: upto 80% higher processing rate, 3X lower latency, 7.5X lower jitter and 4.5X lower network bandwidth usage. Compared to gRPC or Linkerd, DataXc is highly flexible, achieves up to 2X higher processing rate, lower latency and lower jitter, but it also consumes more network bandwidth.", "fno": "09927837", "keywords": [ "Image Motion Analysis", "Internet", "Jitter", "Message Passing", "Parallel Programming", "Video Signal Processing", "Video Surveillance", "2 X Higher Processing Rate", "Based Communication Method", "Broker Based Messaging Systems", "Communication Design", "Communication Mechanism", "Communication Methods", "Custom Point To Point Communication Method Data Xc", "Desirable Flexibility", "Differential Processing Rates", "Efficient Mechanisms", "Flexible Communication", "G RPC", "Linkerd", "Message Processing Rate", "Microservices Based Stream Analytics Pipelines", "NATS", "Performant Microservices Based Application", "Prior Proposals", "Pull Based Design", "Rigid Point To Point Communication Method Data Xc", "Unnecessary Communication", "Upto 80 Higher Processing Rate", "Video Analytics Pipelines", "Video Analytics Tasks", "Visual Analytics", "Pipelines", "Microservice Architectures", "Bandwidth", "Jitter", "Streaming Media", "Video Surveillance", "Microservices", "Video Analytics", "Real Time", "Communication", "G RPC", "Linkerd", "NATS", "Data X" ], "authors": [ { "affiliation": "NEC Laboratories America, Inc.,Princeton,NJ", "fullName": "Giuseppe Coviello", "givenName": "Giuseppe", "surname": "Coviello", "__typename": "ArticleAuthorType" }, { "affiliation": "NEC Laboratories America, Inc.,Princeton,NJ", "fullName": "Kunal Rao", "givenName": "Kunal", "surname": "Rao", "__typename": "ArticleAuthorType" }, { "affiliation": "NEC Laboratories America, Inc.,Princeton,NJ", "fullName": "Ciro Giuseppe De Vita", "givenName": "Ciro Giuseppe", "surname": "De Vita", "__typename": "ArticleAuthorType" }, { "affiliation": "NEC Laboratories America, Inc.,Princeton,NJ", "fullName": "Gennaro Mellone", "givenName": "Gennaro", "surname": "Mellone", "__typename": "ArticleAuthorType" }, { "affiliation": "NEC Laboratories America, Inc.,Princeton,NJ", "fullName": "Srimat Chakradhar", "givenName": "Srimat", "surname": "Chakradhar", "__typename": "ArticleAuthorType" } ], "idPrefix": "dasc-picom-cbdcom-cyberscitech", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-09-01T00:00:00", "pubType": "proceedings", "pages": "1-9", "year": "2022", "issn": null, "isbn": "978-1-6654-6297-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09927999", "articleId": "1J4CDaJfbwY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09927898", "articleId": "1J4Cty6oXPq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "trans/tg/2011/04/ttg2011040440", "title": "Forecasting Hotspots—A Predictive Analytics Approach", "doi": null, "abstractUrl": "/journal/tg/2011/04/ttg2011040440/13rRUwdrdSv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2021/3902/0/09671978", "title": "MI-OPJ: A Microservices-based Online Programming Judge", "doi": null, "abstractUrl": "/proceedings-article/big-data/2021/09671978/1A8gvWzULCw", "parentPublication": { "id": "proceedings/big-data/2021/3902/0", "title": "2021 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2021/3902/0/09671382", "title": "A Microservices Based Architecture for Implementing and Automating ETL Data Pipelines for Mobile Crowdsensing Applications", "doi": null, "abstractUrl": "/proceedings-article/big-data/2021/09671382/1A8j9uHglDa", "parentPublication": { "id": "proceedings/big-data/2021/3902/0", "title": "2021 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sec/2021/8390/0/839000a165", "title": "Microservice-based Edge Device Architecture for Video Analytics", "doi": null, "abstractUrl": "/proceedings-article/sec/2021/839000a165/1B2H9sgX0HK", "parentPublication": { "id": "proceedings/sec/2021/8390/0", "title": "2021 IEEE/ACM Symposium on Edge Computing (SEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sesos/2022/9334/0/933400a033", "title": "MicroGraphQL: a Unified Communication Approach for Systems of Systems using Microservices and GraphQL", "doi": null, "abstractUrl": "/proceedings-article/sesos/2022/933400a033/1ED1X8cE5lm", "parentPublication": { "id": "proceedings/sesos/2022/9334/0", "title": "2022 IEEE/ACM 10th International Workshop on Software Engineering for Systems-of-Systems and Software Ecosystems (SESoS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloud/2022/8137/0/813700a241", "title": "Data Access Pattern Recommendations for Microservices Architecture", "doi": null, "abstractUrl": "/proceedings-article/cloud/2022/813700a241/1G6jXIXniNy", "parentPublication": { "id": "proceedings/cloud/2022/8137/0", "title": "2022 IEEE 15th International Conference on Cloud Computing (CLOUD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acsos/2022/7137/0/713700a091", "title": "BLOC: Balancing Load with Overload Control In the Microservices Architecture", "doi": null, "abstractUrl": "/proceedings-article/acsos/2022/713700a091/1I1P0mBkIRa", "parentPublication": { "id": "proceedings/acsos/2022/7137/0", "title": "2022 IEEE International Conference on Autonomic Computing and Self-Organizing Systems (ACSOS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ucc/2022/6087/0/608700a061", "title": "Polaris Scheduler: SLO- and Topology-aware Microservices Scheduling at the Edge", "doi": null, "abstractUrl": "/proceedings-article/ucc/2022/608700a061/1LvAcMXqEXS", "parentPublication": { "id": "proceedings/ucc/2022/6087/0", "title": "2022 IEEE/ACM 15th International Conference on Utility and Cloud Computing (UCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edoc/2021/3579/0/357900a134", "title": "Microservices Orchestration vs. Choreography: A Decision Framework", "doi": null, "abstractUrl": "/proceedings-article/edoc/2021/357900a134/1yZ5oJqixHy", "parentPublication": { "id": "proceedings/edoc/2021/3579/0", "title": "2021 IEEE 25th International Enterprise Distributed Object Computing Conference (EDOC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/2021/2744/0/09631479", "title": "A Microservices and Blockchain Based One Time Password (MBB-OTP) Protocol for Security-Enhanced Authentication", "doi": null, "abstractUrl": "/proceedings-article/iscc/2021/09631479/1zmvMYhwBdS", "parentPublication": { "id": "proceedings/iscc/2021/2744/0", "title": "2021 IEEE Symposium on Computers and Communications (ISCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1JZ58zpsBDG", "title": "2022 48th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)", "acronym": "seaa", "groupId": "10011188", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1JZ5iwD4ypq", "doi": "10.1109/SEAA56994.2022.00013", "title": "WALTS: Walmart AutoML Libraries, Tools and Services", "normalizedTitle": "WALTS: Walmart AutoML Libraries, Tools and Services", "abstract": "Automated Machine Learning (AutoML) is an upcoming field in machine learning (ML) that searches the candidate model space for a given task, dataset and an evaluation metric and returns the best performing model on the supplied dataset as per the given metric. AutoML not only reduces the man-power and expertise needed to develop ML models but also decreases the time-to-market for ML models substantially. In Walmart, we have designed an enterprise-scale AutoML frame-work called WALTS to meet the rising demand of employing ML in the retail business, and thus help democratize ML within our organization. In this work, we delve into the design of WALTS from both algorithmic and architectural perspectives. Specfiically, we elaborate on how we explore models from a pool of candidates along with describing our choice of technology stack to make the whole process scalable and robust. We illustrate the process with the help of a business use-case, and finally underline how WALTS has impacted our business so far.", "abstracts": [ { "abstractType": "Regular", "content": "Automated Machine Learning (AutoML) is an upcoming field in machine learning (ML) that searches the candidate model space for a given task, dataset and an evaluation metric and returns the best performing model on the supplied dataset as per the given metric. AutoML not only reduces the man-power and expertise needed to develop ML models but also decreases the time-to-market for ML models substantially. In Walmart, we have designed an enterprise-scale AutoML frame-work called WALTS to meet the rising demand of employing ML in the retail business, and thus help democratize ML within our organization. In this work, we delve into the design of WALTS from both algorithmic and architectural perspectives. Specfiically, we elaborate on how we explore models from a pool of candidates along with describing our choice of technology stack to make the whole process scalable and robust. We illustrate the process with the help of a business use-case, and finally underline how WALTS has impacted our business so far.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Automated Machine Learning (AutoML) is an upcoming field in machine learning (ML) that searches the candidate model space for a given task, dataset and an evaluation metric and returns the best performing model on the supplied dataset as per the given metric. AutoML not only reduces the man-power and expertise needed to develop ML models but also decreases the time-to-market for ML models substantially. In Walmart, we have designed an enterprise-scale AutoML frame-work called WALTS to meet the rising demand of employing ML in the retail business, and thus help democratize ML within our organization. In this work, we delve into the design of WALTS from both algorithmic and architectural perspectives. Specfiically, we elaborate on how we explore models from a pool of candidates along with describing our choice of technology stack to make the whole process scalable and robust. We illustrate the process with the help of a business use-case, and finally underline how WALTS has impacted our business so far.", "fno": "615200a021", "keywords": [ "Learning Artificial Intelligence", "Retail Data Processing", "Automated Machine Learning", "Candidate Model Space", "Enterprise Scale Auto ML Frame Work", "Retail Business", "Walmart Auto ML Libraries", "WALTS", "Measurement", "Bridges", "Training", "Wheels", "Prototypes", "Machine Learning", "Organizations", "Auto ML", "Machine Learning", "Classification", "Natural Language Processing" ], "authors": [ { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Rahul Bajaj", "givenName": "Rahul", "surname": "Bajaj", "__typename": "ArticleAuthorType" }, { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Kunal Banerjee", "givenName": "Kunal", "surname": "Banerjee", "__typename": "ArticleAuthorType" }, { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Lalitdutt Parsai", "givenName": "Lalitdutt", "surname": "Parsai", "__typename": "ArticleAuthorType" }, { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Deepansh Goyal", "givenName": "Deepansh", "surname": "Goyal", "__typename": "ArticleAuthorType" }, { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Sachin Parmar", "givenName": "Sachin", "surname": "Parmar", "__typename": "ArticleAuthorType" }, { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Divyajyothi Bn", "givenName": "Divyajyothi", "surname": "Bn", "__typename": "ArticleAuthorType" }, { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Balamurugan Subramaniam", "givenName": "Balamurugan", "surname": "Subramaniam", "__typename": "ArticleAuthorType" }, { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Chaitanya Sai", "givenName": "Chaitanya", "surname": "Sai", "__typename": "ArticleAuthorType" }, { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Tarun Balotia", "givenName": "Tarun", "surname": "Balotia", "__typename": "ArticleAuthorType" }, { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Anirban Chatterjee", "givenName": "Anirban", "surname": "Chatterjee", "__typename": "ArticleAuthorType" }, { "affiliation": "Walmart Global Tech,Bangalore,India", "fullName": "Kailash Sati", "givenName": "Kailash", "surname": "Sati", "__typename": "ArticleAuthorType" } ], "idPrefix": "seaa", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "21-28", "year": "2022", "issn": null, "isbn": "978-1-6654-6152-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "615200a013", "articleId": "1JZ59QR9PNe", "__typename": "AdjacentArticleType" }, "next": { "fno": "615200a029", "articleId": "1JZ5gkRZcmk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdmw/2021/2427/0/242700a238", "title": "Stochastic Schemata Exploiter-Based AutoML", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2021/242700a238/1AjSENasAfK", "parentPublication": { "id": "proceedings/icdmw/2021/2427/0", "title": "2021 International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ex/2022/05/09927206", "title": "Beyond AutoML: Mindful and Actionable AI and AutoAI With Mind and Action", "doi": null, "abstractUrl": "/magazine/ex/2022/05/09927206/1HGJtMK50LC", "parentPublication": { "id": "mags/ex", "title": "IEEE Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsme/2022/7956/0/795600a059", "title": "An Empirical Study on the Usage of Automated Machine Learning Tools", "doi": null, "abstractUrl": "/proceedings-article/icsme/2022/795600a059/1JeFkmlWKFq", "parentPublication": { "id": "proceedings/icsme/2022/7956/0", "title": "2022 IEEE International Conference on Software Maintenance and Evolution (ICSME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09005963", "title": "Modeling and Forecasting Armed Conflict: AutoML with Human-Guided Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09005963/1hJs1JWYiOY", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2019/3798/0/379800b471", "title": "Towards Automated Machine Learning: Evaluation and Comparison of AutoML Approaches and Tools", "doi": null, "abstractUrl": "/proceedings-article/ictai/2019/379800b471/1hrLRPyQ8co", "parentPublication": { "id": "proceedings/ictai/2019/3798/0", "title": "2019 IEEE 31st International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222086", "title": "<italic>PipelineProfiler:</italic> A Visual Analytics Tool for the Exploration of AutoML Pipelines", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222086/1nTrpup4LZe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/09/09321731", "title": "AutoML for Multi-Label Classification: Overview and Empirical Evaluation", "doi": null, "abstractUrl": "/journal/tp/2021/09/09321731/1qmbhpPOIp2", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2021/1219/0/121900a287", "title": "Data Analytics and Machine Learning Methods, Techniques and Tool for Model-Driven Engineering of Smart IoT Services", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2021/121900a287/1sET7EMLB2E", "parentPublication": { "id": "proceedings/icse-companion/2021/1219/0/", "title": "2021 IEEE/ACM 43rd International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcoss/2021/3929/0/392900a451", "title": "Network-Aware AutoML Framework for Software-Defined Sensor Networks", "doi": null, "abstractUrl": "/proceedings-article/dcoss/2021/392900a451/1yBFouKygSs", "parentPublication": { "id": "proceedings/dcoss/2021/3929/0", "title": "2021 17th International Conference on Distributed Computing in Sensor Systems (DCOSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2021/0898/0/089800a412", "title": "AutoTSC: Optimization Algorithm to Automatically Solve the Time Series Classification Problem", "doi": null, "abstractUrl": "/proceedings-article/ictai/2021/089800a412/1zw61xpot2w", "parentPublication": { "id": "proceedings/ictai/2021/0898/0", "title": "2021 IEEE 33rd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cTI8geedm8", "title": "2019 IEEE World Congress on Services (SERVICES)", "acronym": "services", "groupId": "1800492", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cTIbYtbuIE", "doi": "10.1109/SERVICES.2019.00093", "title": "MLModelScope: Evaluate and Introspect Cognitive Pipelines", "normalizedTitle": "MLModelScope: Evaluate and Introspect Cognitive Pipelines", "abstract": "The current landscape of cognitive pipelines exercises many Machine Learning (ML) and Deep Learning (DL) building blocks. These ML and DL building blocks leverage non-uniform frameworks, models, and system stacks. Currently, there is no end-to-end tool that facilitates ML and DL building blocks evaluation and introspection within cognitive pipelines. Due to the absence of such tools, the current practice for evaluating and comparing the benefits of hardware or software innovations on end-to-end cognitive pipelines is both arduous and error-prone - stifling the rate of adoption of innovations. We propose MLModelScope: a hardware/software agnostic platform to facilitate evaluation and introspection of cognitive pipelines in the cloud or on the edge. We describe the design and implementation of MLModelScope and show how it provides a holistic view of the execution of components within cognitive pipelines. MLModelScope aids application developers in experimenting with and discovering cognitive models, data scientists in comparing and evaluating published algorithms, and system architects in optimizing system stacks for cognitive applications.", "abstracts": [ { "abstractType": "Regular", "content": "The current landscape of cognitive pipelines exercises many Machine Learning (ML) and Deep Learning (DL) building blocks. These ML and DL building blocks leverage non-uniform frameworks, models, and system stacks. Currently, there is no end-to-end tool that facilitates ML and DL building blocks evaluation and introspection within cognitive pipelines. Due to the absence of such tools, the current practice for evaluating and comparing the benefits of hardware or software innovations on end-to-end cognitive pipelines is both arduous and error-prone - stifling the rate of adoption of innovations. We propose MLModelScope: a hardware/software agnostic platform to facilitate evaluation and introspection of cognitive pipelines in the cloud or on the edge. We describe the design and implementation of MLModelScope and show how it provides a holistic view of the execution of components within cognitive pipelines. MLModelScope aids application developers in experimenting with and discovering cognitive models, data scientists in comparing and evaluating published algorithms, and system architects in optimizing system stacks for cognitive applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The current landscape of cognitive pipelines exercises many Machine Learning (ML) and Deep Learning (DL) building blocks. These ML and DL building blocks leverage non-uniform frameworks, models, and system stacks. Currently, there is no end-to-end tool that facilitates ML and DL building blocks evaluation and introspection within cognitive pipelines. Due to the absence of such tools, the current practice for evaluating and comparing the benefits of hardware or software innovations on end-to-end cognitive pipelines is both arduous and error-prone - stifling the rate of adoption of innovations. We propose MLModelScope: a hardware/software agnostic platform to facilitate evaluation and introspection of cognitive pipelines in the cloud or on the edge. We describe the design and implementation of MLModelScope and show how it provides a holistic view of the execution of components within cognitive pipelines. MLModelScope aids application developers in experimenting with and discovering cognitive models, data scientists in comparing and evaluating published algorithms, and system architects in optimizing system stacks for cognitive applications.", "fno": "385100a335", "keywords": [ "Cloud Computing", "Learning Artificial Intelligence", "Pipeline Processing", "Program Diagnostics", "Software Performance Evaluation", "ML Model Scope", "Building Blocks Evaluation", "Deep Learning Building Blocks", "Machine Learning Building Blocks", "Building Blocks Introspection", "Hardware Software Agnostic Platform", "Performance Profiling", "End To End Cognitive Application Pipeline", "Software Profiler", "Hardware Profiler", "Pipelines", "Hardware", "Tools", "Libraries", "Graphics Processing Units", "Data Models", "Machine Learning", "Deep Learning", "Performance Profiling", "AI Software" ], "authors": [ { "affiliation": "University of Illinois Urbana-Champaign", "fullName": "Cheng Li", "givenName": "Cheng", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Illinois Urbana-Champaign", "fullName": "Abdul Dakkak", "givenName": "Abdul", "surname": "Dakkak", "__typename": "ArticleAuthorType" }, { "affiliation": "IBM T.J. Watson Research Center", "fullName": "Jinjun Xiong", "givenName": "Jinjun", "surname": "Xiong", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Illinois Urbana-Champaign", "fullName": "Wen-mei Hwu", "givenName": "Wen-mei", "surname": "Hwu", "__typename": "ArticleAuthorType" } ], "idPrefix": "services", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-07-01T00:00:00", "pubType": "proceedings", "pages": "335-338", "year": "2019", "issn": null, "isbn": "978-1-7281-3851-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "385100a015", "articleId": "1cTIeMb7TWw", "__typename": "AdjacentArticleType" }, "next": { "fno": "385100a021", "articleId": "1cTIav2VCik", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/rcose/2018/5745/0/574501a027", "title": "Exploiting DevOps Practices for Dependable and Secure Continuous Delivery Pipelines", "doi": null, "abstractUrl": "/proceedings-article/rcose/2018/574501a027/13bd1tl2olI", "parentPublication": { "id": "proceedings/rcose/2018/5745/0", "title": "2018 IEEE/ACM 4th International Workshop on Rapid Continuous Software Engineering (RCoSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2018/8384/0/838400a807", "title": "Exploring Flexible Communications for Streamlining DNN Ensemble Training Pipelines", "doi": null, "abstractUrl": "/proceedings-article/sc/2018/838400a807/17D45XERmlD", "parentPublication": { "id": "proceedings/sc/2018/8384/0", "title": "2018 SC18: The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apsec/2021/3784/0/378400a463", "title": "On the Impact of ML use cases on Industrial Data Pipelines", "doi": null, "abstractUrl": "/proceedings-article/apsec/2021/378400a463/1B4marpusIE", "parentPublication": { "id": "proceedings/apsec/2021/3784/0", "title": "2021 28th Asia-Pacific Software Engineering Conference (APSEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2022/9747/0/974700b040", "title": "ScaDL 2022 Invited Talk 2: AI/ML Pipelines using CodeFlare", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2022/974700b040/1Fu9tldpQNG", "parentPublication": { "id": "proceedings/ipdpsw/2022/9747/0", "title": "2022 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2020/06/09046248", "title": "Design and Implementation of Reconfigurable Asynchronous Pipelines", "doi": null, "abstractUrl": "/journal/si/2020/06/09046248/1isuta8TrMI", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2020/6876/0/09139875", "title": "XSP: Across-Stack Profiling and Analysis of Machine Learning Models on GPUs", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2020/09139875/1lsrZoQ4wM0", "parentPublication": { "id": "proceedings/ipdps/2020/6876/0", "title": "2020 IEEE International Parallel and Distributed Processing Symposium (IPDPS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccgrid/2020/6095/0/09139677", "title": "Performance Analysis of Distributed and Scalable Deep Learning", "doi": null, "abstractUrl": "/proceedings-article/ccgrid/2020/09139677/1lssuNVORs4", "parentPublication": { "id": "proceedings/ccgrid/2020/6095/0", "title": "2020 20th IEEE/ACM International Symposium on Cluster, Cloud and Internet Computing (CCGRID)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222086", "title": "<italic>PipelineProfiler:</italic> A Visual Analytics Tool for the Exploration of AutoML Pipelines", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222086/1nTrpup4LZe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaice/2020/9146/0/914600a494", "title": "Towards MLOps: A Case Study of ML Pipeline Platform", "doi": null, "abstractUrl": "/proceedings-article/icaice/2020/914600a494/1rCgan1pDPy", "parentPublication": { "id": "proceedings/icaice/2020/9146/0", "title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/saner/2021/9630/0/963000a422", "title": "On the Co-evolution of ML Pipelines and Source Code - Empirical Study of DVC Projects", "doi": null, "abstractUrl": "/proceedings-article/saner/2021/963000a422/1twftdvmG88", "parentPublication": { "id": "proceedings/saner/2021/9630/0", "title": "2021 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hJrHq07uw0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "acronym": "big-data", "groupId": "1802964", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hJs1JWYiOY", "doi": "10.1109/BigData47090.2019.9005963", "title": "Modeling and Forecasting Armed Conflict: AutoML with Human-Guided Machine Learning", "normalizedTitle": "Modeling and Forecasting Armed Conflict: AutoML with Human-Guided Machine Learning", "abstract": "Machine learning has made slow inroads into quantitative social science due to both a mismatch of machine learning's strengths to the causal and inferential tasks domain researchers pursue [1] and also a lack of algorithmic training among many domain experts [2]. However, conflict research- the empirical examination of political unrest, violence and civil war-has seen a growing emphasis on prediction and forecasting models. We describe automated machine learning (AutoML) to identify models, and human-guided machine learning (HGML), and show how these can incorporate domain knowledge and research requirements into model selection and assessment, and provide high quality machine learning pipelines to domain experts comparable to state-of-the-literature solutions. We examine three peer-reviewed papers with predictive models of conflict [3, 4, 5] and run their data through our HGML system using multiple AutoML engines and find this system produces slightly elevated performance on each paper's model, without any ML expertise required of the user. Our research has three takeaways for computational social science. First, predictive models of conflict would benefit from even minimal applications of AutoML; Secondly, human-guided machine learning offers the attractive option of constraining AutoML systems to address the kinds of questions conflict researchers assess with predictive models; Finally, current existing AutoML implementations produce divergent solutions and so can be productively harnessed in parallel.", "abstracts": [ { "abstractType": "Regular", "content": "Machine learning has made slow inroads into quantitative social science due to both a mismatch of machine learning's strengths to the causal and inferential tasks domain researchers pursue [1] and also a lack of algorithmic training among many domain experts [2]. However, conflict research- the empirical examination of political unrest, violence and civil war-has seen a growing emphasis on prediction and forecasting models. We describe automated machine learning (AutoML) to identify models, and human-guided machine learning (HGML), and show how these can incorporate domain knowledge and research requirements into model selection and assessment, and provide high quality machine learning pipelines to domain experts comparable to state-of-the-literature solutions. We examine three peer-reviewed papers with predictive models of conflict [3, 4, 5] and run their data through our HGML system using multiple AutoML engines and find this system produces slightly elevated performance on each paper's model, without any ML expertise required of the user. Our research has three takeaways for computational social science. First, predictive models of conflict would benefit from even minimal applications of AutoML; Secondly, human-guided machine learning offers the attractive option of constraining AutoML systems to address the kinds of questions conflict researchers assess with predictive models; Finally, current existing AutoML implementations produce divergent solutions and so can be productively harnessed in parallel.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Machine learning has made slow inroads into quantitative social science due to both a mismatch of machine learning's strengths to the causal and inferential tasks domain researchers pursue [1] and also a lack of algorithmic training among many domain experts [2]. However, conflict research- the empirical examination of political unrest, violence and civil war-has seen a growing emphasis on prediction and forecasting models. We describe automated machine learning (AutoML) to identify models, and human-guided machine learning (HGML), and show how these can incorporate domain knowledge and research requirements into model selection and assessment, and provide high quality machine learning pipelines to domain experts comparable to state-of-the-literature solutions. We examine three peer-reviewed papers with predictive models of conflict [3, 4, 5] and run their data through our HGML system using multiple AutoML engines and find this system produces slightly elevated performance on each paper's model, without any ML expertise required of the user. Our research has three takeaways for computational social science. First, predictive models of conflict would benefit from even minimal applications of AutoML; Secondly, human-guided machine learning offers the attractive option of constraining AutoML systems to address the kinds of questions conflict researchers assess with predictive models; Finally, current existing AutoML implementations produce divergent solutions and so can be productively harnessed in parallel.", "fno": "09005963", "keywords": [ "Learning Artificial Intelligence", "Social Sciences", "Auto ML", "Human Guided Machine Learning", "Causal Tasks", "Quantitative Social Science", "Inferential Tasks", "Automated Machine Learning", "Predictive Models", "Machine Learning", "Data Models", "Forecasting", "Task Analysis", "Testing", "Logistics", "Human Guided Machine Learning", "Automated Machine Learning Auto ML", "Conflict Forecasting" ], "authors": [ { "affiliation": "University of Texas at Dallas,Department of Political Science,Dallas,TX", "fullName": "Vito D’Orazio", "givenName": "Vito", "surname": "D’Orazio", "__typename": "ArticleAuthorType" }, { "affiliation": "Harvard John A. Paulson School of Engineering and Applied Sciences,Center for Research on Computation and Society,Cambridge,MA", "fullName": "James Honaker", "givenName": "James", "surname": "Honaker", "__typename": "ArticleAuthorType" }, { "affiliation": "Harvard John A. Paulson School of Engineering and Applied Sciences,Center for Research on Computation and Society,Cambridge,MA", "fullName": "Raman Prasady", "givenName": "Raman", "surname": "Prasady", "__typename": "ArticleAuthorType" }, { "affiliation": "Harvard John A. Paulson School of Engineering and Applied Sciences,Center for Research on Computation and Society,Cambridge,MA", "fullName": "Michael Shoemate", "givenName": "Michael", "surname": "Shoemate", "__typename": "ArticleAuthorType" } ], "idPrefix": "big-data", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-12-01T00:00:00", "pubType": "proceedings", "pages": "4714-4723", "year": "2019", "issn": null, "isbn": "978-1-7281-0858-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09005669", "articleId": "1hJrTr0D9Sw", "__typename": "AdjacentArticleType" }, "next": { "fno": "09005534", "articleId": "1hJstJWyeYg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/aiaws/1991/2240/0/00236566", "title": "A new perspective on conflict resolution in market forecasting", "doi": null, "abstractUrl": "/proceedings-article/aiaws/1991/00236566/12OmNzlUKs5", "parentPublication": { "id": "proceedings/aiaws/1991/2240/0", "title": "Proceedings First International Conference on Artificial Intelligence Applications on Wall Street", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2022/9221/0/922100b932", "title": "SAPIENTML: Synthesizing Machine Learning Pipelines by Learning from Human-Written Solutions", "doi": null, "abstractUrl": "/proceedings-article/icse/2022/922100b932/1Emsk8cWwaA", "parentPublication": { "id": "proceedings/icse/2022/9221/0", "title": "2022 IEEE/ACM 44th International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seaa/2022/6152/0/615200a021", "title": "WALTS: Walmart AutoML Libraries, Tools and Services", "doi": null, "abstractUrl": "/proceedings-article/seaa/2022/615200a021/1JZ5iwD4ypq", "parentPublication": { "id": "proceedings/seaa/2022/6152/0", "title": "2022 48th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2023/8263/0/826300a216", "title": "A Proactive and Generalizable Conflict Prediction Model", "doi": null, "abstractUrl": "/proceedings-article/icsc/2023/826300a216/1LFKRRk80i4", "parentPublication": { "id": "proceedings/icsc/2023/8263/0", "title": "2023 IEEE 17th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/synasc/2018/0625/0/062500a260", "title": "Toward on-Line Predictive Models for Forecasting Workload in Clouds", "doi": null, "abstractUrl": "/proceedings-article/synasc/2018/062500a260/1bhJwSyef5u", "parentPublication": { "id": "proceedings/synasc/2018/0625/0", "title": "2018 20th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2019/4896/0/489600a777", "title": "Automated Machine Learning Techniques in Prognostics of Railway Track Defects", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2019/489600a777/1gAx1hMxlp6", "parentPublication": { "id": "proceedings/icdmw/2019/4896/0", "title": "2019 International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300i994", "title": "Transferable AutoML by Model Sharing Over Grouped Datasets", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300i994/1gyrUJHqu3e", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222086", "title": "<italic>PipelineProfiler:</italic> A Visual Analytics Tool for the Exploration of AutoML Pipelines", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222086/1nTrpup4LZe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/09/09347828", "title": "Predicting Machine Learning Pipeline Runtimes in the Context of Automated Machine Learning", "doi": null, "abstractUrl": "/journal/tp/2021/09/09347828/1qWIbgHm4da", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigdataservice/2021/3483/0/348300a017", "title": "BOAT: A Bayesian Optimization AutoML Time-series Framework for Industrial Applications", "doi": null, "abstractUrl": "/proceedings-article/bigdataservice/2021/348300a017/1xNNnuFu6UU", "parentPublication": { "id": "proceedings/bigdataservice/2021/3483/0", "title": "2021 IEEE Seventh International Conference on Big Data Computing Service and Applications (BigDataService)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hrLN3V2XKg", "title": "2019 IEEE 31st International Conference on Tools with Artificial Intelligence (ICTAI)", "acronym": "ictai", "groupId": "1000763", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hrLRPyQ8co", "doi": "10.1109/ICTAI.2019.00209", "title": "Towards Automated Machine Learning: Evaluation and Comparison of AutoML Approaches and Tools", "normalizedTitle": "Towards Automated Machine Learning: Evaluation and Comparison of AutoML Approaches and Tools", "abstract": "There has been considerable growth and interest in industrial applications of machine learning (ML) in recent years. ML engineers, as a consequence, are in high demand across the industry, yet improving the efficiency of ML engineers remains a fundamental challenge. Automated machine learning (AutoML) has emerged as a way to save time and effort on repetitive tasks in ML pipelines, such as data pre-processing, feature engineering, model selection, hyperparameter optimization, and prediction result analysis. In this paper, we investigate the current state of AutoML tools aiming to automate these tasks. We conduct various evaluations of the tools on many datasets, in different data segments, to examine their performance, and compare their advantages and disadvantages on different test cases.", "abstracts": [ { "abstractType": "Regular", "content": "There has been considerable growth and interest in industrial applications of machine learning (ML) in recent years. ML engineers, as a consequence, are in high demand across the industry, yet improving the efficiency of ML engineers remains a fundamental challenge. Automated machine learning (AutoML) has emerged as a way to save time and effort on repetitive tasks in ML pipelines, such as data pre-processing, feature engineering, model selection, hyperparameter optimization, and prediction result analysis. In this paper, we investigate the current state of AutoML tools aiming to automate these tasks. We conduct various evaluations of the tools on many datasets, in different data segments, to examine their performance, and compare their advantages and disadvantages on different test cases.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "There has been considerable growth and interest in industrial applications of machine learning (ML) in recent years. ML engineers, as a consequence, are in high demand across the industry, yet improving the efficiency of ML engineers remains a fundamental challenge. Automated machine learning (AutoML) has emerged as a way to save time and effort on repetitive tasks in ML pipelines, such as data pre-processing, feature engineering, model selection, hyperparameter optimization, and prediction result analysis. In this paper, we investigate the current state of AutoML tools aiming to automate these tasks. We conduct various evaluations of the tools on many datasets, in different data segments, to examine their performance, and compare their advantages and disadvantages on different test cases.", "fno": "379800b471", "keywords": [ "Learning Artificial Intelligence", "Automated Machine Learning", "Industrial Applications", "ML Engineers", "ML Pipelines", "Data Preprocessing", "Feature Engineering", "Auto ML Tools", "Auto ML Automated Machine Learning Driverless AI Model Selection Hyperparameter Optimization" ], "authors": [ { "affiliation": "Capital One, USA", "fullName": "Anh Truong", "givenName": "Anh", "surname": "Truong", "__typename": "ArticleAuthorType" }, { "affiliation": "Capital One, USA", "fullName": "Austin Walters", "givenName": "Austin", "surname": "Walters", "__typename": "ArticleAuthorType" }, { "affiliation": "Capital One, USA", "fullName": "Jeremy Goodsitt", "givenName": "Jeremy", "surname": "Goodsitt", "__typename": "ArticleAuthorType" }, { "affiliation": "Capital One, USA", "fullName": "Keegan Hines", "givenName": "Keegan", "surname": "Hines", "__typename": "ArticleAuthorType" }, { "affiliation": "Capital One, USA", "fullName": "C. Bayan Bruss", "givenName": "C. Bayan", "surname": "Bruss", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Illinois at Urbana-Champaign, USA", "fullName": "Reza Farivar", "givenName": "Reza", "surname": "Farivar", "__typename": "ArticleAuthorType" } ], "idPrefix": "ictai", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-11-01T00:00:00", "pubType": "proceedings", "pages": "1471-1479", "year": "2019", "issn": null, "isbn": "978-1-7281-3798-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "379800b463", "articleId": "1hrLQ2a8ngA", "__typename": "AdjacentArticleType" }, "next": { "fno": "379800b480", "articleId": "1hrLZVoe6C4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/bracis/2018/8023/0/802300a121", "title": "Bandit-Based Automated Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/bracis/2018/802300a121/17D45VTRoBx", "parentPublication": { "id": "proceedings/bracis/2018/8023/0", "title": "2018 7th Brazilian Conference on Intelligent Systems (BRACIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seaa/2022/6152/0/615200a021", "title": "WALTS: Walmart AutoML Libraries, Tools and Services", "doi": null, "abstractUrl": "/proceedings-article/seaa/2022/615200a021/1JZ5iwD4ypq", "parentPublication": { "id": "proceedings/seaa/2022/6152/0", "title": "2022 48th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsme/2022/7956/0/795600a059", "title": "An Empirical Study on the Usage of Automated Machine Learning Tools", "doi": null, "abstractUrl": "/proceedings-article/icsme/2022/795600a059/1JeFkmlWKFq", "parentPublication": { "id": "proceedings/icsme/2022/7956/0", "title": "2022 IEEE International Conference on Software Maintenance and Evolution (ICSME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2022/4609/0/460900a047", "title": "cSmartML-Glassbox: Increasing Transparency and Controllability in Automated Clustering", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2022/460900a047/1KBqR5zoGu4", "parentPublication": { "id": "proceedings/icdmw/2022/4609/0", "title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09005963", "title": "Modeling and Forecasting Armed Conflict: AutoML with Human-Guided Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09005963/1hJs1JWYiOY", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b812", "title": "AutoDispNet: Improving Disparity Estimation With AutoML", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b812/1hVlIoubirC", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222086", "title": "<italic>PipelineProfiler:</italic> A Visual Analytics Tool for the Exploration of AutoML Pipelines", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222086/1nTrpup4LZe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600b220", "title": "AutoClust: A Framework for Automated Clustering Based on Cluster Validity Indices", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600b220/1r54G6iNcqI", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigdataservice/2021/3483/0/348300a017", "title": "BOAT: A Bayesian Optimization AutoML Time-series Framework for Industrial Applications", "doi": null, "abstractUrl": "/proceedings-article/bigdataservice/2021/348300a017/1xNNnuFu6UU", "parentPublication": { "id": "proceedings/bigdataservice/2021/3483/0", "title": "2021 IEEE Seventh International Conference on Big Data Computing Service and Applications (BigDataService)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcoss/2021/3929/0/392900a451", "title": "Network-Aware AutoML Framework for Software-Defined Sensor Networks", "doi": null, "abstractUrl": "/proceedings-article/dcoss/2021/392900a451/1yBFouKygSs", "parentPublication": { "id": "proceedings/dcoss/2021/3929/0", "title": "2021 17th International Conference on Distributed Computing in Sensor Systems (DCOSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCwUmAj", "title": "2008 Second International Symposium on Universal Communication", "acronym": "isuc", "groupId": "1002556", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNAPSMkF", "doi": "10.1109/ISUC.2008.39", "title": "Inferring User Interests from Relevance Feedback with High Similarity Sequence Data-Driven Clustering", "normalizedTitle": "Inferring User Interests from Relevance Feedback with High Similarity Sequence Data-Driven Clustering", "abstract": "Relevance feedback is an important source of information about a user and often used for usage and user modeling for further personalization of user-system interactions. In this paper we present a method to infer the user’s interests from his/her relevance feedback using an online incremental clustering method. For inference of a new interest (concept) and concept update the method uses the similarity characteristics of uniform user relevance feedback. It is fast, easy to implement and gives reasonable clustering results. We evaluate the method against two different data sets, demonstrate and discuss the outcomes.", "abstracts": [ { "abstractType": "Regular", "content": "Relevance feedback is an important source of information about a user and often used for usage and user modeling for further personalization of user-system interactions. In this paper we present a method to infer the user’s interests from his/her relevance feedback using an online incremental clustering method. For inference of a new interest (concept) and concept update the method uses the similarity characteristics of uniform user relevance feedback. It is fast, easy to implement and gives reasonable clustering results. We evaluate the method against two different data sets, demonstrate and discuss the outcomes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Relevance feedback is an important source of information about a user and often used for usage and user modeling for further personalization of user-system interactions. In this paper we present a method to infer the user’s interests from his/her relevance feedback using an online incremental clustering method. For inference of a new interest (concept) and concept update the method uses the similarity characteristics of uniform user relevance feedback. It is fast, easy to implement and gives reasonable clustering results. We evaluate the method against two different data sets, demonstrate and discuss the outcomes.", "fno": "3433a390", "keywords": [ "User Interests", "Incremental Clustering", "Relevance Feedback" ], "authors": [ { "affiliation": null, "fullName": "Roman Y. Shtykh", "givenName": "Roman Y.", "surname": "Shtykh", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qun Jin", "givenName": "Qun", "surname": "Jin", "__typename": "ArticleAuthorType" } ], "idPrefix": "isuc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "390-396", "year": "2008", "issn": null, "isbn": "978-0-7695-3433-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3433a382", "articleId": "12OmNy2rRXi", "__typename": "AdjacentArticleType" }, "next": { "fno": "3433a397", "articleId": "12OmNqNXEnU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ssme/2009/3729/0/3729a587", "title": "Relevance Feedback on Keyword Space for Interactive Information Retrieval", "doi": null, "abstractUrl": "/proceedings-article/ssme/2009/3729a587/12OmNB0nWce", "parentPublication": { "id": "proceedings/ssme/2009/3729/0", "title": "2009 IITA International Conference on Services Science, Management and Engineering (SSME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a272", "title": "Relative Relevance Feedback in Image Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a272/12OmNBUS73n", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2010/4217/0/4217a342", "title": "A Lazy Processing Approach to User Relevance Feedback for Content-Based Image Retrieval", "doi": null, "abstractUrl": "/proceedings-article/ism/2010/4217a342/12OmNro0I9F", "parentPublication": { "id": "proceedings/ism/2010/4217/0", "title": "2010 IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2012/4880/3/4880c122", "title": "Relevance Feedback Fusion via Query Expansion", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2012/4880c122/12OmNvBrgGQ", "parentPublication": { "id": "proceedings/wi-iat/2012/4880/1", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscsct/2008/3498/2/3498b198", "title": "Partial Relevance Feedback for 3D Model Retrieval", "doi": null, "abstractUrl": "/proceedings-article/iscsct/2008/3498b198/12OmNx6xHnV", "parentPublication": { "id": "proceedings/iscsct/2008/3498/1", "title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/alpit/2007/2930/0/2930a559", "title": "Reinforcement Learning Using Negative Relevance Feedback", "doi": null, "abstractUrl": "/proceedings-article/alpit/2007/2930a559/12OmNyQ7G9Z", "parentPublication": { "id": "proceedings/alpit/2007/2930/0", "title": "Advanced Language Processing and Web Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2011/1189/0/05999140", "title": "Improving content-based retrieval of medical images through dynamic distance on relevance feedback", "doi": null, "abstractUrl": "/proceedings-article/cbms/2011/05999140/12OmNzTppHL", "parentPublication": { "id": "proceedings/cbms/2011/1189/0", "title": "2011 24th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2008/3358/0/3358a155", "title": "A Genetic Programming Approach for Relevance Feedback in Region-Based Image Retrieval Systems", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2008/3358a155/12OmNzZEAsj", "parentPublication": { "id": "proceedings/sibgrapi/2008/3358/0", "title": "2008 XXI Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kam/2009/3888/2/3888b361", "title": "Non-relevance Feedback for Document Retrieval", "doi": null, "abstractUrl": "/proceedings-article/kam/2009/3888b361/12OmNzvhvvm", "parentPublication": { "id": "proceedings/kam/2009/3888/2", "title": "Knowledge Acquisition and Modeling, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2006/04/k0509", "title": "A Unified Log-Based Relevance Feedback Scheme for Image Retrieval", "doi": null, "abstractUrl": "/journal/tk/2006/04/k0509/13rRUwInvBp", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBTawna", "title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)", "acronym": "cse", "groupId": "1002115", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNyO8tMr", "doi": "10.1109/CSE.2014.122", "title": "A Bayesian Nonparametric Topic Model for User Interest Modeling", "normalizedTitle": "A Bayesian Nonparametric Topic Model for User Interest Modeling", "abstract": "Web users display their preferences implicitly by a sequence of pages they navigated. Web recommendation systems use methods to extract useful knowledge about user interests from such data. We propose a Bayesian nonparametric approach to the problem of modeling user interests in recommender systems using implicit feedback like user navigations and clicks on items. Our approach is based on the discovery of a set of latent interests that are shared among users in the system and make a key assumption that each user activity is motivated only by several interests amongst user interest profile which is quite different from most of the existing recommendation algorithms. By using a beta process and a Dirichlet prior, the number of hidden interests and the relationships between interests and items are both inferred from the data. In order to model the sequential information on user's visits, we make a Markovian assumption on each user's navigated item sequence. We develop a Markov chain Monte Carlo inference method based on the Indian buffet process representation of the beta process. We validate our sampling algorithm using synthetic data and real world datasets to demonstrate promising results on recovering the hidden user interests.", "abstracts": [ { "abstractType": "Regular", "content": "Web users display their preferences implicitly by a sequence of pages they navigated. Web recommendation systems use methods to extract useful knowledge about user interests from such data. We propose a Bayesian nonparametric approach to the problem of modeling user interests in recommender systems using implicit feedback like user navigations and clicks on items. Our approach is based on the discovery of a set of latent interests that are shared among users in the system and make a key assumption that each user activity is motivated only by several interests amongst user interest profile which is quite different from most of the existing recommendation algorithms. By using a beta process and a Dirichlet prior, the number of hidden interests and the relationships between interests and items are both inferred from the data. In order to model the sequential information on user's visits, we make a Markovian assumption on each user's navigated item sequence. We develop a Markov chain Monte Carlo inference method based on the Indian buffet process representation of the beta process. We validate our sampling algorithm using synthetic data and real world datasets to demonstrate promising results on recovering the hidden user interests.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Web users display their preferences implicitly by a sequence of pages they navigated. Web recommendation systems use methods to extract useful knowledge about user interests from such data. We propose a Bayesian nonparametric approach to the problem of modeling user interests in recommender systems using implicit feedback like user navigations and clicks on items. Our approach is based on the discovery of a set of latent interests that are shared among users in the system and make a key assumption that each user activity is motivated only by several interests amongst user interest profile which is quite different from most of the existing recommendation algorithms. By using a beta process and a Dirichlet prior, the number of hidden interests and the relationships between interests and items are both inferred from the data. In order to model the sequential information on user's visits, we make a Markovian assumption on each user's navigated item sequence. We develop a Markov chain Monte Carlo inference method based on the Indian buffet process representation of the beta process. We validate our sampling algorithm using synthetic data and real world datasets to demonstrate promising results on recovering the hidden user interests.", "fno": "7981a527", "keywords": [ "Hidden Markov Models", "Data Models", "Navigation", "Collaboration", "Filtering", "Bayes Methods", "Motion Pictures" ], "authors": [ { "affiliation": null, "fullName": "Qinjiao Mao", "givenName": "Qinjiao", "surname": "Mao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Boqin Feng", "givenName": "Boqin", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shanliang Pan", "givenName": "Shanliang", "surname": "Pan", "__typename": "ArticleAuthorType" } ], "idPrefix": "cse", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-12-01T00:00:00", "pubType": "proceedings", "pages": "527-534", "year": "2014", "issn": null, "isbn": "978-1-4799-7981-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7981a519", "articleId": "12OmNqBtiNl", "__typename": "AdjacentArticleType" }, "next": { "fno": "7981a535", "articleId": "12OmNxQOjGF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wi-iat/2013/5145/3/5145c001", "title": "Integrating Time Forgetting Mechanisms into Topic-Based User Interest Profiling", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2013/5145c001/12OmNCf1Dsn", "parentPublication": { "id": "proceedings/wi-iat/2013/5145/3", "title": "2013 IEEE/WIC/ACM International Joint Conferences on Web Intelligence (WI) and Intelligent Agent Technologies (IAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2014/4302/0/4302a845", "title": "Social Topic Modeling for Point-of-Interest Recommendation in Location-Based Social Networks", "doi": null, "abstractUrl": "/proceedings-article/icdm/2014/4302a845/12OmNCykm7E", "parentPublication": { "id": "proceedings/icdm/2014/4302/0", "title": "2014 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scc/2016/2628/0/2628a778", "title": "Latent Interest and Topic Mining on User-Item Bipartite Networks", "doi": null, "abstractUrl": "/proceedings-article/scc/2016/2628a778/12OmNqHqSwo", "parentPublication": { "id": "proceedings/scc/2016/2628/0", "title": "2016 IEEE International Conference on Services Computing (SCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2015/9618/1/9618a123", "title": "Topic Oriented User Influence Analysis in Social Networks", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2015/9618a123/12OmNx9WT0N", "parentPublication": { "id": "proceedings/wi-iat/2015/9618/1", "title": "2015 IEEE / WIC / ACM International Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2016/5473/0/07837897", "title": "A Bayesian Nonparametric Approach to Dynamic Dyadic Data Prediction", "doi": null, "abstractUrl": "/proceedings-article/icdm/2016/07837897/12OmNyKJin6", "parentPublication": { "id": "proceedings/icdm/2016/5473/0", "title": "2016 IEEE 16th International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nas/2013/5034/0/5034a242", "title": "A New Interest-Sensitive and Network-Sensitive Method for User Recommendation", "doi": null, "abstractUrl": "/proceedings-article/nas/2013/5034a242/12OmNzFdt4z", "parentPublication": { "id": "proceedings/nas/2013/5034/0", "title": "2013 IEEE 8th International Conference on Networking, Architecture, and Storage (NAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2016/04/07305793", "title": "Domain-Sensitive Recommendation with User-Item Subgroup Analysis", "doi": null, "abstractUrl": "/journal/tk/2016/04/07305793/13rRUxBa56D", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2018/9159/0/08594893", "title": "Collapsed Variational Inference for Nonparametric Bayesian Group Factor Analysis", "doi": null, "abstractUrl": "/proceedings-article/icdm/2018/08594893/17D45WHONnn", "parentPublication": { "id": "proceedings/icdm/2018/9159/0", "title": "2018 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0/148500a698", "title": "Multi-interest Aware Recommendation in CrowdIntell Network", "doi": null, "abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2020/148500a698/1ua4BUPmf04", "parentPublication": { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0", "title": "2020 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2020/6406/0/640600b499", "title": "User Local and Global Interest Depth Interaction for Recommendation Algorithm", "doi": null, "abstractUrl": "/proceedings-article/icisce/2020/640600b499/1x3kwc8YJxK", "parentPublication": { "id": "proceedings/icisce/2020/6406/0", "title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKiqX", "title": "2018 IEEE 8th International Conference on Computational Advances in Bio and Medical Sciences (ICCABS)", "acronym": "iccabs", "groupId": "1800307", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WHONh4", "doi": "10.1109/ICCABS.2018.8542086", "title": "Inferring Relationships in Microbiomes from Signed Bayesian Networks", "normalizedTitle": "Inferring Relationships in Microbiomes from Signed Bayesian Networks", "abstract": "Microbe-microbe and host-microbe interactions in a microbiome play a vital role in both health and disease. However, the structure of the microbial community and the colonization patterns are highly complex to infer even under controlled wet laboratory conditions. In this study, we investigate what information, if any, can be provided by a Bayesian Network (BN) about a microbial community. Unlike the previously proposed Co-occurrence Networks (CoNs), BNs are based on conditional dependencies and can help in revealing complex associations. In this paper, we report a surprising association between directed edges in BNs and known colonization order. Furthermore, when combined with the sign of the correlations from CoNs, BNs allow for many useful conclusions.In this paper, we ask the pertinent question: what can BNs reveal about the relationships between microbial taxa in a microbiome? In particular, we argue that BNs are able to capture temporal order (colonization order) when combined with the sign of correlation. The main contribution of this paper is to show how to carefully use BNs in conjunction with CoNs to make potential inferences about colonization order.We carried out our experiments on two datasets. The first is oral microbiome data from the Human Microbiome Project (HMP) [1], which included eight different sites all from within the oral cavity from 242 healthy adults (129 males, 113 females). The second is data from preterm infant gut microbiome samples as described in the paper by La Rosa et al. [2]. A total of 922 stool samples from 58 premature babies (each weighing ≤ 1500 g at birth) collected on different days were used for our analysis.", "abstracts": [ { "abstractType": "Regular", "content": "Microbe-microbe and host-microbe interactions in a microbiome play a vital role in both health and disease. However, the structure of the microbial community and the colonization patterns are highly complex to infer even under controlled wet laboratory conditions. In this study, we investigate what information, if any, can be provided by a Bayesian Network (BN) about a microbial community. Unlike the previously proposed Co-occurrence Networks (CoNs), BNs are based on conditional dependencies and can help in revealing complex associations. In this paper, we report a surprising association between directed edges in BNs and known colonization order. Furthermore, when combined with the sign of the correlations from CoNs, BNs allow for many useful conclusions.In this paper, we ask the pertinent question: what can BNs reveal about the relationships between microbial taxa in a microbiome? In particular, we argue that BNs are able to capture temporal order (colonization order) when combined with the sign of correlation. The main contribution of this paper is to show how to carefully use BNs in conjunction with CoNs to make potential inferences about colonization order.We carried out our experiments on two datasets. The first is oral microbiome data from the Human Microbiome Project (HMP) [1], which included eight different sites all from within the oral cavity from 242 healthy adults (129 males, 113 females). The second is data from preterm infant gut microbiome samples as described in the paper by La Rosa et al. [2]. A total of 922 stool samples from 58 premature babies (each weighing ≤ 1500 g at birth) collected on different days were used for our analysis.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Microbe-microbe and host-microbe interactions in a microbiome play a vital role in both health and disease. However, the structure of the microbial community and the colonization patterns are highly complex to infer even under controlled wet laboratory conditions. In this study, we investigate what information, if any, can be provided by a Bayesian Network (BN) about a microbial community. Unlike the previously proposed Co-occurrence Networks (CoNs), BNs are based on conditional dependencies and can help in revealing complex associations. In this paper, we report a surprising association between directed edges in BNs and known colonization order. Furthermore, when combined with the sign of the correlations from CoNs, BNs allow for many useful conclusions.In this paper, we ask the pertinent question: what can BNs reveal about the relationships between microbial taxa in a microbiome? In particular, we argue that BNs are able to capture temporal order (colonization order) when combined with the sign of correlation. The main contribution of this paper is to show how to carefully use BNs in conjunction with CoNs to make potential inferences about colonization order.We carried out our experiments on two datasets. The first is oral microbiome data from the Human Microbiome Project (HMP) [1], which included eight different sites all from within the oral cavity from 242 healthy adults (129 males, 113 females). The second is data from preterm infant gut microbiome samples as described in the paper by La Rosa et al. [2]. A total of 922 stool samples from 58 premature babies (each weighing ≤ 1500 g at birth) collected on different days were used for our analysis.", "fno": "08542086", "keywords": [ "Bioinformatics", "Bayes Methods", "Correlation", "Pediatrics", "Diseases", "Cavity Resonators", "Indexes", "Bayesian Networks", "16 S R RNA", "Microbiome", "Colonization", "PC Stable" ], "authors": [ { "affiliation": "Bioinformatics Research Group (BioRG), Florida International University, Miami, FL, 33199, USA", "fullName": "Musfiqur Rahman Sazal", "givenName": "Musfiqur Rahman", "surname": "Sazal", "__typename": "ArticleAuthorType" }, { "affiliation": "Bioinformatics Research Group (BioRG), Florida International University, Miami, FL, 33199, USA", "fullName": "Daniel Ruiz-Perez", "givenName": "Daniel", "surname": "Ruiz-Perez", "__typename": "ArticleAuthorType" }, { "affiliation": "Bioinformatics Research Group (BioRG), Florida International University, Miami, FL, 33199, USA", "fullName": "Trevor Cickovski", "givenName": "Trevor", "surname": "Cickovski", "__typename": "ArticleAuthorType" }, { "affiliation": "Bioinformatics Research Group (BioRG), Florida International University, Miami, FL, 33199, USA", "fullName": "Giri Narasimhan", "givenName": "Giri", "surname": "Narasimhan", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccabs", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "1-1", "year": "2018", "issn": null, "isbn": "978-1-5386-8520-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08542038", "articleId": "17D45XzbnKM", "__typename": "AdjacentArticleType" }, "next": { "fno": "08542043", "articleId": "17D45WgziTh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acsat/2015/0423/0/07478725", "title": "Cuckoo Search-Based Bayesian Networks for Medical Estimation System", "doi": null, "abstractUrl": "/proceedings-article/acsat/2015/07478725/12OmNAu1Fnw", "parentPublication": { "id": "proceedings/acsat/2015/0423/0", "title": "2015 4th International Conference on Advanced Computer Science Applications and Technologies (ACSAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icime/2009/3595/0/3595a450", "title": "Using Bayesian Networks for Bankruptcy Prediction: Empirical Evidence from Iranian Companies", "doi": null, "abstractUrl": "/proceedings-article/icime/2009/3595a450/12OmNvqEvJo", "parentPublication": { "id": "proceedings/icime/2009/3595/0", "title": "Information Management and Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/grc/2014/5464/0/06982831", "title": "Inferring gene regulatory networks from perturbed gene expression data using a dynamic Bayesian network with a Markov Chain Monte Carlo algorithm", "doi": null, "abstractUrl": "/proceedings-article/grc/2014/06982831/12OmNxXUhMw", "parentPublication": { "id": "proceedings/grc/2014/5464/0", "title": "2014 IEEE International Conference on Granular Computing (GrC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/119P1C11", "title": "Sparse Bayesian multi-task learning for predicting cognitive outcomes from neuroimaging measures in Alzheimer's disease", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/119P1C11/12OmNyYDDIZ", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2018/6217/0/247100a014", "title": "Stratification of Human Gut Microiome and Building a SVM-Based Classifier", "doi": null, "abstractUrl": "/proceedings-article/bibe/2018/247100a014/17D45WrVg2H", "parentPublication": { "id": "proceedings/bibe/2018/6217/0", "title": "2018 IEEE 18th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/5555/01/09882379", "title": "Critique of &#x201C;A Parallel Framework for Constraint-Based Bayesian Network Learning via Markov Blanket Discovery&#x201D; by SCC Team from ShanghaiTech University", "doi": null, "abstractUrl": "/journal/td/5555/01/09882379/1GwNXJ8yuek", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/5555/01/09933728", "title": "Critique of: &#x201D;A Parallel Framework for Constraint-Based Bayesian Network Learning via Markov Blanket Discovery&#x201D; by SCC Team from UC San Diego", "doi": null, "abstractUrl": "/journal/td/5555/01/09933728/1HVsB2wC0da", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2022/6819/0/09995639", "title": "Inferring microbe-metabolite interactions by heterogeneous network fusion based on graph convolution network", "doi": null, "abstractUrl": "/proceedings-article/bibm/2022/09995639/1JC2zAbD29i", "parentPublication": { "id": "proceedings/bibm/2022/6819/0", "title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscsic/2022/5488/0/548800a045", "title": "Application of prototypical networks in microbiome-based disease prediction", "doi": null, "abstractUrl": "/proceedings-article/iscsic/2022/548800a045/1LvAo5DG2XK", "parentPublication": { "id": "proceedings/iscsic/2022/5488/0", "title": "2022 6th International Symposium on Computer Science and Intelligent Control (ISCSIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }