data
dict
{ "proceeding": { "id": "12OmNx7ouU1", "title": "2010 International Conference on Cyberworlds", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNvjyy3K", "doi": "10.1109/CW.2010.71", "title": "NHE: Collaborative Virtual Environment with Augmented Reality on Web", "normalizedTitle": "NHE: Collaborative Virtual Environment with Augmented Reality on Web", "abstract": "Interfaces in two dimensions, like buttons and menus, are been used for 35 years. Technologies have been developed to extend interfaces for tridimensional environment. One of them, called Augmented Reality, is being viewed due to the ease on interaction with the virtual environment. By other side, due to complexity of human tasks, people are getting together to perform tasks through collaborative groups, named groupware. This article proposes a system that does collaboration and has easy interactivity and immersion, by using augmented reality resources. This integration is not so easy to find out on current market, and it is a great motivation for this innovation. The project can help several areas like, for example, distance education, engineering, architecture and marketing. Results show the viability of the system, and its efficiency in applications that needs easy manipulation of projects and high degree of immersion of users, offering facility to activities at real time, without network congestion and in a collaborative way.", "abstracts": [ { "abstractType": "Regular", "content": "Interfaces in two dimensions, like buttons and menus, are been used for 35 years. Technologies have been developed to extend interfaces for tridimensional environment. One of them, called Augmented Reality, is being viewed due to the ease on interaction with the virtual environment. By other side, due to complexity of human tasks, people are getting together to perform tasks through collaborative groups, named groupware. This article proposes a system that does collaboration and has easy interactivity and immersion, by using augmented reality resources. This integration is not so easy to find out on current market, and it is a great motivation for this innovation. The project can help several areas like, for example, distance education, engineering, architecture and marketing. Results show the viability of the system, and its efficiency in applications that needs easy manipulation of projects and high degree of immersion of users, offering facility to activities at real time, without network congestion and in a collaborative way.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Interfaces in two dimensions, like buttons and menus, are been used for 35 years. Technologies have been developed to extend interfaces for tridimensional environment. One of them, called Augmented Reality, is being viewed due to the ease on interaction with the virtual environment. By other side, due to complexity of human tasks, people are getting together to perform tasks through collaborative groups, named groupware. This article proposes a system that does collaboration and has easy interactivity and immersion, by using augmented reality resources. This integration is not so easy to find out on current market, and it is a great motivation for this innovation. The project can help several areas like, for example, distance education, engineering, architecture and marketing. Results show the viability of the system, and its efficiency in applications that needs easy manipulation of projects and high degree of immersion of users, offering facility to activities at real time, without network congestion and in a collaborative way.", "fno": "4215a438", "keywords": [ "Virtual Collaborative Spaces", "Networked Collaboration", "Computer Vision", "Augmented", "Mixed And Virtual Reality" ], "authors": [ { "affiliation": null, "fullName": "Anderson Carlos M. Tavares", "givenName": "Anderson Carlos M.", "surname": "Tavares", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Sérgio Murilo M. Fernandes", "givenName": "Sérgio Murilo M.", "surname": "Fernandes", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Maria Lencastre P. de Menezes Cruz", "givenName": "Maria", "surname": "Lencastre P. de Menezes Cruz", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-10-01T00:00:00", "pubType": "proceedings", "pages": "438-444", "year": "2010", "issn": null, "isbn": "978-0-7695-4215-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4215a430", "articleId": "12OmNyQGSjF", "__typename": "AdjacentArticleType" }, "next": { "fno": "4215a445", "articleId": "12OmNCbkQDI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iv/2007/2900/0/29000156", "title": "Coordinated and Multiple Views in Augmented Reality Environment", "doi": null, "abstractUrl": "/proceedings-article/iv/2007/29000156/12OmNs4S8Jo", "parentPublication": { "id": "proceedings/iv/2007/2900/0", "title": "2007 11th International Conference Information Visualization (IV '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2012/4814/0/4814a077", "title": "Preliminary Evaluation of an Augmented Reality Collaborative Process Modelling System", "doi": null, "abstractUrl": "/proceedings-article/cw/2012/4814a077/12OmNvlg8kc", "parentPublication": { "id": "proceedings/cw/2012/4814/0", "title": "2012 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/u-media/2011/4493/0/4493a253", "title": "The Feasibility of Augmented Reality on Virtual Tourism Website", "doi": null, "abstractUrl": "/proceedings-article/u-media/2011/4493a253/12OmNvnOwyH", "parentPublication": { "id": "proceedings/u-media/2011/4493/0", "title": "International Conference on Ubi-Media Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444792", "title": "Mixed reality in virtual world teleconferencing", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444792/12OmNwpoFEM", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892355", "title": "Design of collaborative 3D user interfaces for virtual and augmented reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892355/12OmNxVV5WW", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a279", "title": "BrainChat - A Collaborative Augmented Reality Brain Interface for Message Communication", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a279/12OmNzIUfWo", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699316", "title": "Inverse Augmented Reality: A Virtual Agent's Perspective", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699316/19F1UA1hw40", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798299", "title": "Extending a User Involvement Tool with Virtual and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798299/1cJ0Yi1HRYI", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2020/03/08993789", "title": "Augmented and Virtual Reality in Surgery", "doi": null, "abstractUrl": "/magazine/cs/2020/03/08993789/1hkQPiQFzsQ", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2021/02/09475904", "title": "Multimedia in Virtual Reality and Augmented Reality", "doi": null, "abstractUrl": "/magazine/mu/2021/02/09475904/1v2M8hP6UpO", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNxVV5WW", "doi": "10.1109/VR.2017.7892355", "title": "Design of collaborative 3D user interfaces for virtual and augmented reality", "normalizedTitle": "Design of collaborative 3D user interfaces for virtual and augmented reality", "abstract": "We explore design approaches for cooperative work in virtual manipulation tasks. We seek to understand the fundamental aspects of the human cooperation and design interfaces and manipulation actions to enhance the group's ability to solve complex manipulation tasks in various immersion scenarios.", "abstracts": [ { "abstractType": "Regular", "content": "We explore design approaches for cooperative work in virtual manipulation tasks. We seek to understand the fundamental aspects of the human cooperation and design interfaces and manipulation actions to enhance the group's ability to solve complex manipulation tasks in various immersion scenarios.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We explore design approaches for cooperative work in virtual manipulation tasks. We seek to understand the fundamental aspects of the human cooperation and design interfaces and manipulation actions to enhance the group's ability to solve complex manipulation tasks in various immersion scenarios.", "fno": "07892355", "keywords": [ "Three Dimensional Displays", "Collaboration", "User Interfaces", "Virtual Environments", "Visualization", "Augmented Reality", "H 5 2 Information Interfaces And Presentation User Interfaces Input Devices And Strategies", "H 5 3 Information Interfaces And Presentation Group And Organization Interfaces Computer Supported Cooperative Work" ], "authors": [ { "affiliation": "Institute of Informatics, Federal University of Rio Grande do Sul, Brazil", "fullName": "Jeronimo G Grandi", "givenName": "Jeronimo G", "surname": "Grandi", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "419-420", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892354", "articleId": "12OmNB1wkLS", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892356", "articleId": "12OmNxYL5bz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/svr/2012/4725/0/4725a100", "title": "Integration Framework of Augmented Reality and Tangible Interfaces for Enhancing the User Interaction", "doi": null, "abstractUrl": "/proceedings-article/svr/2012/4725a100/12OmNscfI0t", "parentPublication": { "id": "proceedings/svr/2012/4725/0", "title": "2012 14th Symposium on Virtual and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2010/4215/0/4215a438", "title": "NHE: Collaborative Virtual Environment with Augmented Reality on Web", "doi": null, "abstractUrl": "/proceedings-article/cw/2010/4215a438/12OmNvjyy3K", "parentPublication": { "id": "proceedings/cw/2010/4215/0", "title": "2010 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2012/4814/0/4814a077", "title": "Preliminary Evaluation of an Augmented Reality Collaborative Process Modelling System", "doi": null, "abstractUrl": "/proceedings-article/cw/2012/4814a077/12OmNvlg8kc", "parentPublication": { "id": "proceedings/cw/2012/4814/0", "title": "2012 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asonam/2013/2240/0/06785819", "title": "Optometry training simulation with augmented reality and haptics", "doi": null, "abstractUrl": "/proceedings-article/asonam/2013/06785819/12OmNzYwc1w", "parentPublication": { "id": "proceedings/asonam/2013/2240/0", "title": "2013 International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ic/2013/06/mic2013060066", "title": "Augmented Reality Interfaces", "doi": null, "abstractUrl": "/magazine/ic/2013/06/mic2013060066/13rRUIJcWhZ", "parentPublication": { "id": "mags/ic", "title": "IEEE Internet Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/01/ttg2010010004", "title": "Opportunistic Tangible User Interfaces for Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2010/01/ttg2010010004/13rRUwvT9gn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2014/06/mcg2014060074", "title": "Spatial User Interfaces for Large-Scale Projector-Based Augmented Reality", "doi": null, "abstractUrl": "/magazine/cg/2014/06/mcg2014060074/13rRUxjQyjM", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797893", "title": "Augmented Reality Interfaces for Semi-Autonomous Drones", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797893/1cJ0NJAEGQw", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798171", "title": "[DC] Dimensionality of Augmented Reality Spatial Interfaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798171/1cJ0UKlSMP6", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a778", "title": "Evaluating Object Manipulation Interaction Techniques in Mixed Reality: Tangible User Interfaces and Gesture", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a778/1tuBngWRAC4", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0V5mcpB6", "doi": "10.1109/VR.2019.8797972", "title": "Evaluation of Maslows Hierarchy of Needs on Long-Term Use of HMDs – A Case Study of Office Environment", "normalizedTitle": "Evaluation of Maslows Hierarchy of Needs on Long-Term Use of HMDs – A Case Study of Office Environment", "abstract": "Long-term exposure to VR will become more and more important, but what we need for long term immersion to meet users fundamental needs is still under-researched. In this paper, we apply the theory of Maslows Hierarchy of Needs to guide the design of VR for longterm immersion based on the normal biological rhythm of human beings (24 hours). An office environment is designed to verify those needs. The efficiency, the physical and the psychological effects of this VR office system are tested. The results show that the VR office environment is as comfortable as the physical environment at short-term immersion and it can support users basic immersion. It means that the Maslows Hierarchy of Needs can be a guideline for long-term immersion.", "abstracts": [ { "abstractType": "Regular", "content": "Long-term exposure to VR will become more and more important, but what we need for long term immersion to meet users fundamental needs is still under-researched. In this paper, we apply the theory of Maslows Hierarchy of Needs to guide the design of VR for longterm immersion based on the normal biological rhythm of human beings (24 hours). An office environment is designed to verify those needs. The efficiency, the physical and the psychological effects of this VR office system are tested. The results show that the VR office environment is as comfortable as the physical environment at short-term immersion and it can support users basic immersion. It means that the Maslows Hierarchy of Needs can be a guideline for long-term immersion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Long-term exposure to VR will become more and more important, but what we need for long term immersion to meet users fundamental needs is still under-researched. In this paper, we apply the theory of Maslows Hierarchy of Needs to guide the design of VR for longterm immersion based on the normal biological rhythm of human beings (24 hours). An office environment is designed to verify those needs. The efficiency, the physical and the psychological effects of this VR office system are tested. The results show that the VR office environment is as comfortable as the physical environment at short-term immersion and it can support users basic immersion. It means that the Maslows Hierarchy of Needs can be a guideline for long-term immersion.", "fno": "08797972", "keywords": [ "Helmet Mounted Displays", "Psychology", "Virtual Reality", "Long Term Use", "Long Term Exposure", "Long Term Immersion", "Human Beings", "VR Office System", "VR Office Environment", "Physical Environment", "Short Term Immersion", "Users Basic Immersion", "Long Term Immersion", "Maslows Hierarchy Of Needs", "HMD", "Image Processing", "Psychology", "Employment", "Virtual Reality", "Training", "Solid Modeling", "Biology", "Maslows Hierarchy Of Needs", "Long Term Immersion", "Virtual Office", "Human Centered Computing X 2014 Human Computer Interaction X 2014 Interaction Paradigms X 2014 Virtual Reality", "Human Centered Computing X 2014 Interaction Design X 2014 Interaction Design Theory Concepts And Paradigms" ], "authors": [ { "affiliation": "Beijing Institute of Technology, China", "fullName": "Jie Guo", "givenName": "Jie", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": "AICFVE of Beijing Film Academy, Beijing Institute of Technology, China", "fullName": "Dongdong Weng", "givenName": "Dongdong", "surname": "Weng", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Institute of Technology, China", "fullName": "Zhenliang Zhang", "givenName": "Zhenliang", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "AICFVE of Beijing Film Academy, Beijing Institute of Technology, China", "fullName": "Yue Liu", "givenName": "Yue", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "AICFVE of Beijing Film Academy, Beijing Institute of Technology, China", "fullName": "Yongtian Wang", "givenName": "Yongtian", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "948-949", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797971", "articleId": "1cJ1b26beEg", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797905", "articleId": "1cJ0PcNhP1K", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cw/2017/2089/0/2089a150", "title": "A Comparison of Audio Models for Virtual Reality Video", "doi": null, "abstractUrl": "/proceedings-article/cw/2017/2089a150/12OmNyuPL77", "parentPublication": { "id": "proceedings/cw/2017/2089/0", "title": "2017 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504747", "title": "Immersion at scale: Researcher's guide to ecologically valid mobile experiments", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504747/12OmNz6iO8t", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446288", "title": "Smart Adaptation of BIM for Virtual Reality, Depending on Building Project Actors' Needs: The Nursery Case", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446288/13bd1tMztYr", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714043", "title": "Studying the Effects of Congruence of Auditory and Visual Stimuli on Virtual Reality Experiences", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714043/1B0Y2dBeUi4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a085", "title": "Investigating Lighting Quality in Office Workstations: A Combined Approach Utilizing Virtual Reality and Physical Workstations", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a085/1CJe1WgruTe", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/taai/2021/0825/0/082500a290", "title": "The Needs Analysis of Virtual Exergaming", "doi": null, "abstractUrl": "/proceedings-article/taai/2021/082500a290/1DBZvbw010s", "parentPublication": { "id": "proceedings/taai/2021/0825/0", "title": "2021 International Conference on Technologies and Applications of Artificial Intelligence (TAAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09792298", "title": "Virtual Reality for Emotion Elicitation – A Review", "doi": null, "abstractUrl": "/journal/ta/5555/01/09792298/1E5Lu3wZrZm", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icekim/2022/1666/0/166600a231", "title": "Application of VR Technology in Virtual Simulation Experiment Teaching", "doi": null, "abstractUrl": "/proceedings-article/icekim/2022/166600a231/1KpBw9u2vT2", "parentPublication": { "id": "proceedings/icekim/2022/1666/0", "title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a311", "title": "Affective Virtual Reality System (AVRS): Design and Ratings of Affective VR Scenes", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a311/1ap5C3hrD6o", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943608", "title": "Mixed Reality Office System Based on Maslow’s Hierarchy of Needs: Towards the Long-Term Immersion in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943608/1grOLEsKdnG", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1gRxdWFgE3m", "title": "2019 10th International Conference on Information Technology in Medicine and Education (ITME)", "acronym": "itme", "groupId": "1002567", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1gRxnkcOAak", "doi": "10.1109/ITME.2019.00033", "title": "Data Mining in Cognitive Function Training of Depression Patients Applications", "normalizedTitle": "Data Mining in Cognitive Function Training of Depression Patients Applications", "abstract": "Objective: To study how to use virtual reality technology (VR) to improve the cognitive function of depressive patients. This study explored the process of the diagnosis, training and evaluation of cognitive impairment in patients with depression, and provided new training methods and research ideas for improving cognitive impairment and prognosis of patients with depression. Methods: 30 mild to moderate depression subjects were randomly assigned to the experimental group, and 32 mild to moderate depression subjects were randomly assigned to the control group. The subjects in the experimental group were trained and treated with immersion virtual reality system for six months, while the subjects in the control group did not receive any treatment. MoCA scale and corresponding diagnostic criteria were used to assess the cognitive baseline level before treatment and the cognitive improvement after treatment. Result: The subjects in this study had mild to moderate depression, and the depression was similar in different genders, professions and groups. Before the cognitive training, there was no difference in the scores of MoCA scale between the experimental group and the control group, t = 0.2, P = 0.84. After the cognitive training, there was a significant difference in the scores of MoCA scale between the experimental group and the control group, t = 4.36, P = 0.00. This shows that the cognitive function training and treatment based on VR technology can effectively improve the cognitive level of depressive patients. Conclusion: Using virtual reality technology to train the cognitive ability of mild to moderate depression subjects can significantly improve the cognitive ability of the subjects, which is worthy of clinical application and promotion.", "abstracts": [ { "abstractType": "Regular", "content": "Objective: To study how to use virtual reality technology (VR) to improve the cognitive function of depressive patients. This study explored the process of the diagnosis, training and evaluation of cognitive impairment in patients with depression, and provided new training methods and research ideas for improving cognitive impairment and prognosis of patients with depression. Methods: 30 mild to moderate depression subjects were randomly assigned to the experimental group, and 32 mild to moderate depression subjects were randomly assigned to the control group. The subjects in the experimental group were trained and treated with immersion virtual reality system for six months, while the subjects in the control group did not receive any treatment. MoCA scale and corresponding diagnostic criteria were used to assess the cognitive baseline level before treatment and the cognitive improvement after treatment. Result: The subjects in this study had mild to moderate depression, and the depression was similar in different genders, professions and groups. Before the cognitive training, there was no difference in the scores of MoCA scale between the experimental group and the control group, t = 0.2, P = 0.84. After the cognitive training, there was a significant difference in the scores of MoCA scale between the experimental group and the control group, t = 4.36, P = 0.00. This shows that the cognitive function training and treatment based on VR technology can effectively improve the cognitive level of depressive patients. Conclusion: Using virtual reality technology to train the cognitive ability of mild to moderate depression subjects can significantly improve the cognitive ability of the subjects, which is worthy of clinical application and promotion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Objective: To study how to use virtual reality technology (VR) to improve the cognitive function of depressive patients. This study explored the process of the diagnosis, training and evaluation of cognitive impairment in patients with depression, and provided new training methods and research ideas for improving cognitive impairment and prognosis of patients with depression. Methods: 30 mild to moderate depression subjects were randomly assigned to the experimental group, and 32 mild to moderate depression subjects were randomly assigned to the control group. The subjects in the experimental group were trained and treated with immersion virtual reality system for six months, while the subjects in the control group did not receive any treatment. MoCA scale and corresponding diagnostic criteria were used to assess the cognitive baseline level before treatment and the cognitive improvement after treatment. Result: The subjects in this study had mild to moderate depression, and the depression was similar in different genders, professions and groups. Before the cognitive training, there was no difference in the scores of MoCA scale between the experimental group and the control group, t = 0.2, P = 0.84. After the cognitive training, there was a significant difference in the scores of MoCA scale between the experimental group and the control group, t = 4.36, P = 0.00. This shows that the cognitive function training and treatment based on VR technology can effectively improve the cognitive level of depressive patients. Conclusion: Using virtual reality technology to train the cognitive ability of mild to moderate depression subjects can significantly improve the cognitive ability of the subjects, which is worthy of clinical application and promotion.", "fno": "391800a098", "keywords": [ "Cognition", "Data Mining", "Patient Rehabilitation", "Patient Treatment", "Virtual Reality", "Cognitive Function Training", "Depression Patients Applications", "Virtual Reality Technology", "Depressive Patients", "Cognitive Impairment", "Training Methods", "Depression Subjects", "Control Group", "Immersion Virtual Reality System", "Mo CA Scale", "Cognitive Baseline Level", "Cognitive Improvement", "Moderate Depression", "Professions", "Cognitive Training", "Cognitive Ability", "Training", "Depression", "Virtual Reality", "Diseases", "Psychology", "Tools", "VR Technology", "Depression", "Cognitive Function" ], "authors": [ { "affiliation": "Jiangxi University of Traditional Chinese Medici", "fullName": "Fei Li", "givenName": "Fei", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Jiangxi University of Traditional Chinese Medicine", "fullName": "Yajing Ding", "givenName": "Yajing", "surname": "Ding", "__typename": "ArticleAuthorType" } ], "idPrefix": "itme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-08-01T00:00:00", "pubType": "proceedings", "pages": "98-101", "year": "2019", "issn": null, "isbn": "978-1-7281-3918-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "391800a093", "articleId": "1gRxlGt2rGE", "__typename": "AdjacentArticleType" }, "next": { "fno": "391800a102", "articleId": "1gRxhiUjlpS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cbms/2006/2517/0/25170241", "title": "Development of a Computer-Aided Tool for Evaluation and Training in 3D Spatial Cognitive Function", "doi": null, "abstractUrl": "/proceedings-article/cbms/2006/25170241/12OmNAYXWGS", "parentPublication": { "id": "proceedings/cbms/2006/2517/0", "title": "19th IEEE Symposium on Computer-Based Medical Systems (CBMS'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2016/1611/0/07822702", "title": "An EEG-based study on coherence and brain networks in mild depression cognitive process", "doi": null, "abstractUrl": "/proceedings-article/bibm/2016/07822702/12OmNywxlNQ", "parentPublication": { "id": "proceedings/bibm/2016/1611/0", "title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isms/2013/4963/0/4963a397", "title": "Towards Virtual Therapy for Alcoholic Depression", "doi": null, "abstractUrl": "/proceedings-article/isms/2013/4963a397/12OmNzvz6M2", "parentPublication": { "id": "proceedings/isms/2013/4963/0", "title": "Intelligent Systems, Modelling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2018/12/08326510", "title": "Inferring Cognitive Wellness from Motor Patterns", "doi": null, "abstractUrl": "/journal/tk/2018/12/08326510/17D45XuDNFQ", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2019/3918/0/391800a210", "title": "Study on Intervention of Depression Patients Based on TCM Health Management Platform", "doi": null, "abstractUrl": "/proceedings-article/itme/2019/391800a210/1gRxiuzvchW", "parentPublication": { "id": "proceedings/itme/2019/3918/0", "title": "2019 10th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2019/3918/0/391800a031", "title": "Application of Digital Mining Technology in the Treatment of Depressive Symptoms in Patients with Post-Stroke Depression", "doi": null, "abstractUrl": "/proceedings-article/itme/2019/391800a031/1gRxmjINaVO", "parentPublication": { "id": "proceedings/itme/2019/3918/0", "title": "2019 10th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icphds/2020/8571/0/857100a351", "title": "Rumination Effects on Depression in Adolescents", "doi": null, "abstractUrl": "/proceedings-article/icphds/2020/857100a351/1rxht2DXIOs", "parentPublication": { "id": "proceedings/icphds/2020/8571/0", "title": "2020 International Conference on Public Health and Data Science (ICPHDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icphds/2020/8571/0/857100a360", "title": "Causes and Treatment of Adolescent Depression", "doi": null, "abstractUrl": "/proceedings-article/icphds/2020/857100a360/1rxhud7DsZO", "parentPublication": { "id": "proceedings/icphds/2020/8571/0", "title": "2020 International Conference on Public Health and Data Science (ICPHDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichci/2020/2316/0/231600a071", "title": "Research status of depression related to virtual reality technology in China", "doi": null, "abstractUrl": "/proceedings-article/ichci/2020/231600a071/1tuAaiSt2Gk", "parentPublication": { "id": "proceedings/ichci/2020/2316/0", "title": "2020 International Conference on Intelligent Computing and Human-Computer Interaction (ICHCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tcs/2021/2910/0/291000a253", "title": "Effects of 24 weeks progressive aerobic training on physical and mental health of male drug addicts with different depression levels : —Data analysis based on smart big data acquisition system for drug treatment", "doi": null, "abstractUrl": "/proceedings-article/tcs/2021/291000a253/1wRIaqRqati", "parentPublication": { "id": "proceedings/tcs/2021/2910/0", "title": "2021 International Conference on Information Technology and Contemporary Sports (TCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIxhEnA8IE", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxnENRZCM", "doi": "10.1109/VRW50115.2020.00236", "title": "Effect of marker location on user detection in omnidirectional images", "normalizedTitle": "Effect of marker location on user detection in omnidirectional images", "abstract": "Omnidirectional images have seen increasing usage in virtual tours to display remote destinations. These applications use markers or landmarks within the images to drive user interaction. Viewers must be able to efficiently locate and interact with markers for a positive user experience that retains immersion. However, the effect of marker positioning at different spatial locations on user performance remains unstudied. This work studies the positioning of visual markers within an omnidirectional image environment at three different elevation ranges. Our results show that markers positioned less than 32 ° from the equator were found significantly faster than markers between 32 ° and 64 °.", "abstracts": [ { "abstractType": "Regular", "content": "Omnidirectional images have seen increasing usage in virtual tours to display remote destinations. These applications use markers or landmarks within the images to drive user interaction. Viewers must be able to efficiently locate and interact with markers for a positive user experience that retains immersion. However, the effect of marker positioning at different spatial locations on user performance remains unstudied. This work studies the positioning of visual markers within an omnidirectional image environment at three different elevation ranges. Our results show that markers positioned less than 32 ° from the equator were found significantly faster than markers between 32 ° and 64 °.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Omnidirectional images have seen increasing usage in virtual tours to display remote destinations. These applications use markers or landmarks within the images to drive user interaction. Viewers must be able to efficiently locate and interact with markers for a positive user experience that retains immersion. However, the effect of marker positioning at different spatial locations on user performance remains unstudied. This work studies the positioning of visual markers within an omnidirectional image environment at three different elevation ranges. Our results show that markers positioned less than 32 ° from the equator were found significantly faster than markers between 32 ° and 64 °.", "fno": "09090636", "keywords": [ "Virtual Reality", "Virtual Environments", "Performance Evaluation", "Omnidirectional Images", "360 Degree Panorama", "Virtual Reality", "Marker Positioning", "Human Centered Computing", "Systems And Tools For Interaction Design", "Computing Methodologies", "Virtual Reality" ], "authors": [ { "affiliation": "University of Florida", "fullName": "Ricardo Eiris", "givenName": "Ricardo", "surname": "Eiris", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Florida", "fullName": "Brendan John", "givenName": "Brendan", "surname": "John", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Florida", "fullName": "Eakta Jain", "givenName": "Eakta", "surname": "Jain", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Florida", "fullName": "Masoud Gheisari", "givenName": "Masoud", "surname": "Gheisari", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "770-771", "year": "2020", "issn": null, "isbn": "978-1-7281-6532-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09090633", "articleId": "1jIxsrAlhsY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09090468", "articleId": "1jIxlaoW1UY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pg/2002/1784/0/17840318", "title": "Wandering in VR Environments by Estimating Head Pose Using an Omnicam", "doi": null, "abstractUrl": "/proceedings-article/pg/2002/17840318/12OmNBtCCJD", "parentPublication": { "id": "proceedings/pg/2002/1784/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2007/1630/0/04409209", "title": "Circle-Marker Detection Method for Omnidirectional Images and its Application to Robot Positioning", "doi": null, "abstractUrl": "/proceedings-article/iccv/2007/04409209/12OmNCdk2Ek", "parentPublication": { "id": "proceedings/iccv/2007/1630/0", "title": "2007 11th IEEE International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2017/6721/0/07923755", "title": "Omnidirectional Video Quality Metrics and Evaluation Process", "doi": null, "abstractUrl": "/proceedings-article/dcc/2017/07923755/12OmNxTEiQm", "parentPublication": { "id": "proceedings/dcc/2017/6721/0", "title": "2017 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a421", "title": "View-Dependent Omnidirectional Video Encapsulation Using Multiple Tracks", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a421/1ap5yq7z67u", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797777", "title": "Exploration of Large Omnidirectional Images in Immersive Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797777/1cJ0JISlXDG", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798323", "title": "“Ready Player One”: Enhancing Omnidirectional Treadmills for Use in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798323/1cJ0K4EVowE", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798067", "title": "OmniMR: Omnidirectional Mixed Reality with Spatially-Varying Environment Reflections from Moving 360° Video Cameras", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798067/1cJ1cnBEFb2", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089561", "title": "Real Walking in Place: HEX-CORE-PROTOTYPE Omnidirectional Treadmill", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089561/1jIxfncHjNe", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09106059", "title": "Field-Of-View Effect on The Perceived Quality of Omnidirectional Images", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09106059/1kwqECCHmus", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2021/3225/0/322500a143", "title": "A Technical Report for Visual Attention Estimation in HMD Challenge", "doi": null, "abstractUrl": "/proceedings-article/aivr/2021/322500a143/1zxLvkLF5ew", "parentPublication": { "id": "proceedings/aivr/2021/3225/0", "title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pq9ZGK7miA", "title": "2020 International Conference on Computing and Data Science (CDS)", "acronym": "cds", "groupId": "1838884", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pqa4RCdUAg", "doi": "10.1109/CDS49703.2020.00079", "title": "A virtual environment making method for CAVE system", "normalizedTitle": "A virtual environment making method for CAVE system", "abstract": "CAVE immersive system is a large VR system with high immersion and good interaction means. CAVE can integrate vision, touch, sound, etc., and is widely used in architecture display, equipment structure research, simulation training, scientific research and other fields with high requirements for space sense and visual effect. According to the structure and number of projection screens, the virtual environment in CAVE immersion system need to display multi screen images synchronously. But the existing multi screen display methods are too complex, and even require paid plugins. In order to solved this problem, a method of making CAVE immersive virtual environment based on unity 3D was proposed. This method was very simple and could create virtual reality applications suitable for various CAVE systems quickly.", "abstracts": [ { "abstractType": "Regular", "content": "CAVE immersive system is a large VR system with high immersion and good interaction means. CAVE can integrate vision, touch, sound, etc., and is widely used in architecture display, equipment structure research, simulation training, scientific research and other fields with high requirements for space sense and visual effect. According to the structure and number of projection screens, the virtual environment in CAVE immersion system need to display multi screen images synchronously. But the existing multi screen display methods are too complex, and even require paid plugins. In order to solved this problem, a method of making CAVE immersive virtual environment based on unity 3D was proposed. This method was very simple and could create virtual reality applications suitable for various CAVE systems quickly.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "CAVE immersive system is a large VR system with high immersion and good interaction means. CAVE can integrate vision, touch, sound, etc., and is widely used in architecture display, equipment structure research, simulation training, scientific research and other fields with high requirements for space sense and visual effect. According to the structure and number of projection screens, the virtual environment in CAVE immersion system need to display multi screen images synchronously. But the existing multi screen display methods are too complex, and even require paid plugins. In order to solved this problem, a method of making CAVE immersive virtual environment based on unity 3D was proposed. This method was very simple and could create virtual reality applications suitable for various CAVE systems quickly.", "fno": "710600a377", "keywords": [ "Virtual Reality", "Virtual Environment Making Method", "CAVE System", "CAVE Immersive System", "VR System", "Architecture Display", "Equipment Structure Research", "Simulation Training", "Scientific Research", "Projection Screens", "CAVE Immersion System Need", "Multiscreen Images", "CAVE Immersive Virtual Environment", "Virtual Reality Applications", "Multiscreen Display Methods", "Cameras", "Virtual Environments", "Three Dimensional Displays", "Virtual Reality", "Games", "Tools", "Image Resolution", "Virtual Reality", "CAVE Immersive System", "Unity 3 D", "Multi Screen Display" ], "authors": [ { "affiliation": "JiLin Animation institute,Engineering Research Center,Changchun,China", "fullName": "Liguo Zheng", "givenName": "Liguo", "surname": "Zheng", "__typename": "ArticleAuthorType" }, { "affiliation": "JiLin Animation institute,School of game,Changchun,China", "fullName": "Meili Zhu", "givenName": "Meili", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Branch of JAI Cultural Arts Group,Beijing,China", "fullName": "Hongwei Yu", "givenName": "Hongwei", "surname": "Yu", "__typename": "ArticleAuthorType" } ], "idPrefix": "cds", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-08-01T00:00:00", "pubType": "proceedings", "pages": "377-380", "year": "2020", "issn": null, "isbn": "978-1-7281-7106-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "710600a373", "articleId": "1pqa08cnNlu", "__typename": "AdjacentArticleType" }, "next": { "fno": "710600a381", "articleId": "1pqa0LBMT0Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/nbis/2009/3767/0/3767a608", "title": "Tele-Immersion Environment for Video Avatar Based CVE", "doi": null, "abstractUrl": "/proceedings-article/nbis/2009/3767a608/12OmNB06l11", "parentPublication": { "id": "proceedings/nbis/2009/3767/0", "title": "2009 International Conference on Network-Based Information Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrais/1993/4910/0/00378262", "title": "Scientists in wonderland: A report on visualization applications in the CAVE virtual reality environment", "doi": null, "abstractUrl": "/proceedings-article/vrais/1993/00378262/12OmNC8dg9j", "parentPublication": { "id": "proceedings/vrais/1993/4910/0", "title": "IEEE 1993 Symposium on Research Frontiers in Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2016/4149/0/4149a178", "title": "A CAVE/Desktop Collaborative Virtual Environment for Offshore Oil Platform Training", "doi": null, "abstractUrl": "/proceedings-article/svr/2016/4149a178/12OmNrJiCGH", "parentPublication": { "id": "proceedings/svr/2016/4149/0", "title": "2016 XVIII Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892342", "title": "Uni-CAVE: A Unity3D plugin for non-head mounted VR display systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892342/12OmNs5rkSv", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892331", "title": "Advertising perception with immersive virtual reality devices", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892331/12OmNvk7JO0", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcsw/2008/3173/0/3173a072", "title": "Collaborative and Multimodal Communications System Using Immersive Virtual Reality Environment over Ultrahigh-Speed Network", "doi": null, "abstractUrl": "/proceedings-article/icdcsw/2008/3173a072/12OmNxw5B5z", "parentPublication": { "id": "proceedings/icdcsw/2008/3173/0", "title": "2008 The 28th International Conference on Distributed Computing Systems Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549350", "title": "The impact of enhanced projector display on the responses of people to a violent scenario in immersive virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549350/12OmNzb7Znq", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539620", "title": "Immersive Collaborative Analysis of Network Connectivity: CAVE-style or Head-Mounted Display?", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539620/13rRUwcS1D0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/03/v0323", "title": "CAVE and Fishtank Virtual-Reality Displays: A Qualitative and Quantitative Comparison", "doi": null, "abstractUrl": "/journal/tg/2006/03/v0323/13rRUx0xPhZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/04/07384536", "title": "Examining Rotation Gain in CAVE-like Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2016/04/07384536/13rRUxOdD2H", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tnWwqMuCzu", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tnXudPdN4s", "doi": "10.1109/VRW52623.2021.00106", "title": "CAVE vs. HMD in Distance Perception", "normalizedTitle": "CAVE vs. HMD in Distance Perception", "abstract": "This study aims to analyze differences between a CAVE system and a Head-Mounted Display (HMD), two technologies presenting important differences, focusing on distance perception, as past research on this factor is usually carried with only one or the other device. We performed two experiments. First, we explored the impact of the HMD’s weight, by removing any other bias. Second, we compared distance perception using a simple hand interaction in a replicated environment. Results reveal that the HMD’s weight has no significant impact over short distances, and the usage of a virtual replica was found to improve distance perception.", "abstracts": [ { "abstractType": "Regular", "content": "This study aims to analyze differences between a CAVE system and a Head-Mounted Display (HMD), two technologies presenting important differences, focusing on distance perception, as past research on this factor is usually carried with only one or the other device. We performed two experiments. First, we explored the impact of the HMD’s weight, by removing any other bias. Second, we compared distance perception using a simple hand interaction in a replicated environment. Results reveal that the HMD’s weight has no significant impact over short distances, and the usage of a virtual replica was found to improve distance perception.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This study aims to analyze differences between a CAVE system and a Head-Mounted Display (HMD), two technologies presenting important differences, focusing on distance perception, as past research on this factor is usually carried with only one or the other device. We performed two experiments. First, we explored the impact of the HMD’s weight, by removing any other bias. Second, we compared distance perception using a simple hand interaction in a replicated environment. Results reveal that the HMD’s weight has no significant impact over short distances, and the usage of a virtual replica was found to improve distance perception.", "fno": "405700a448", "keywords": [ "Helmet Mounted Displays", "Human Computer Interaction", "Virtual Reality", "Wearable Computers", "Distance Perception", "CAVE System", "Head Mounted Display", "HMD", "Hand Interaction", "Performance Evaluation", "Meters", "Solid Modeling", "Three Dimensional Displays", "Conferences", "Resists", "Virtual Reality", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Virtual Reality" ], "authors": [ { "affiliation": "Arts et Metiers Institute of Technology, LISPEN, HESAM Université UBFC", "fullName": "Théo Combe", "givenName": "Théo", "surname": "Combe", "__typename": "ArticleAuthorType" }, { "affiliation": "Arts et Metiers Institute of Technology, LISPEN, HESAM Université UBFC", "fullName": "Jean-Rémy Chardonnet", "givenName": "Jean-Rémy", "surname": "Chardonnet", "__typename": "ArticleAuthorType" }, { "affiliation": "Arts et Metiers Institute of Technology, LISPEN, HESAM Université UBFC", "fullName": "Frédéric Merienne", "givenName": "Frédéric", "surname": "Merienne", "__typename": "ArticleAuthorType" }, { "affiliation": "Arts et Metiers Institute of Technology, LISPEN, HESAM Université, UBFC", "fullName": "Jivka Ovtcharova", "givenName": "Jivka", "surname": "Ovtcharova", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "448-449", "year": "2021", "issn": null, "isbn": "978-1-6654-4057-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "405700a446", "articleId": "1tnWN99G6TS", "__typename": "AdjacentArticleType" }, "next": { "fno": "405700a450", "articleId": "1tnWO8nmeeA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446551", "title": "A Demonstration of ShareVR: Co-Located Experiences for Virtual Reality Between HMD and Non-HMD Users", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446551/13bd1gzWkQD", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699267", "title": "Perception and Action in Peripersonal Space: A Comparison Between Video and Optical See-Through Augmented Reality Devices", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699267/19F1NuzXn9u", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09850416", "title": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics", "doi": null, "abstractUrl": "/journal/tg/5555/01/09850416/1Fz4SPLVTMY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798174", "title": "Comparison in Depth Perception between Virtual Reality and Augmented Reality Systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798174/1cJ11OY78k0", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797826", "title": "Virtual Objects Look Farther on the Sides: The Anisotropy of Distance Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797826/1cJ18Y9D9Di", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090527", "title": "Distance Perception in Modern Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090527/1jIxsZjczAc", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a462", "title": "Body Weight Perception of Females using Photorealistic Avatars in Virtual and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a462/1pysu9tPcGc", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a080", "title": "Can Retinal Projection Displays Improve Spatial Perception in Augmented Reality?", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a080/1pysvYTZF6w", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a542", "title": "Field of View Effect on Distance Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a542/1tnXQ9aew80", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2021/3225/0/322500a143", "title": "A Technical Report for Visual Attention Estimation in HMD Challenge", "doi": null, "abstractUrl": "/proceedings-article/aivr/2021/322500a143/1zxLvkLF5ew", "parentPublication": { "id": "proceedings/aivr/2021/3225/0", "title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuisc", "title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "acronym": "sibgrapi", "groupId": "1000131", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNBLdKJ0", "doi": "10.1109/SIBGRAPI.2015.31", "title": "Exploratory Segmentation of Vector Fields Using Multidimensional Projection", "normalizedTitle": "Exploratory Segmentation of Vector Fields Using Multidimensional Projection", "abstract": "The difficulty to understand the complex behavior of vector fields makes its visual segmentation an area of constant interest in scientific visualization. In this paper, we present a novel interactive segmentation framework for discrete vector fields. In our method, the vector field domain is partitioned into multiple regions with same flow patterns. In order to accomplish this task, feature vectors are extracted from streamlines and mapped to a visual space using multidimensional projection. The interactivity with projected data in the visual space improves the results of the segmentation according to user's knowledge. The provided results and comparisons show the flexibility and effectiveness of our framework.", "abstracts": [ { "abstractType": "Regular", "content": "The difficulty to understand the complex behavior of vector fields makes its visual segmentation an area of constant interest in scientific visualization. In this paper, we present a novel interactive segmentation framework for discrete vector fields. In our method, the vector field domain is partitioned into multiple regions with same flow patterns. In order to accomplish this task, feature vectors are extracted from streamlines and mapped to a visual space using multidimensional projection. The interactivity with projected data in the visual space improves the results of the segmentation according to user's knowledge. The provided results and comparisons show the flexibility and effectiveness of our framework.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The difficulty to understand the complex behavior of vector fields makes its visual segmentation an area of constant interest in scientific visualization. In this paper, we present a novel interactive segmentation framework for discrete vector fields. In our method, the vector field domain is partitioned into multiple regions with same flow patterns. In order to accomplish this task, feature vectors are extracted from streamlines and mapped to a visual space using multidimensional projection. The interactivity with projected data in the visual space improves the results of the segmentation according to user's knowledge. The provided results and comparisons show the flexibility and effectiveness of our framework.", "fno": "7962a250", "keywords": [ "Visualization", "Feature Extraction", "Aerospace Electronics", "Histograms", "Image Segmentation", "Pipelines", "Interpolation", "Visualization", "Vector Field", "Segmentation", "Multidimensional Projection" ], "authors": [ { "affiliation": null, "fullName": "Danilo Motta", "givenName": "Danilo", "surname": "Motta", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Maria Oliveira", "givenName": "Maria", "surname": "Oliveira", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Paulo Pagliosa", "givenName": "Paulo", "surname": "Pagliosa", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Luis Gustavo Nonato", "givenName": "Luis Gustavo", "surname": "Nonato", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Afonso Paiva", "givenName": "Afonso", "surname": "Paiva", "__typename": "ArticleAuthorType" } ], "idPrefix": "sibgrapi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-08-01T00:00:00", "pubType": "proceedings", "pages": "250-256", "year": "2015", "issn": "1530-1834", "isbn": "978-1-4673-7962-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7962a242", "articleId": "12OmNy2rRVA", "__typename": "AdjacentArticleType" }, "next": { "fno": "7962a257", "articleId": "12OmNqFJhEw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2003/2030/0/20300026", "title": "Clifford Convolution And Pattern Matching On Vector Fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300026/12OmNBVrjmf", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2013/4797/0/06596153", "title": "Exploring vector fields with distribution-based streamline analysis", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2013/06596153/12OmNvAiSjV", "parentPublication": { "id": "proceedings/pacificvis/2013/4797/0", "title": "2013 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1996/3673/0/36730115", "title": "Raycasting Vector Fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1996/36730115/12OmNvqmULT", "parentPublication": { "id": "proceedings/ieee-vis/1996/3673/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300032", "title": "Chameleon: An Interactive Texture-based Rendering Framework for Visualizing Three-dimensional Vector Fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300032/12OmNzh5z0U", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/03/ttg2012030407", "title": "Streamline Embedding for 3D Vector Field Exploration", "doi": null, "abstractUrl": "/journal/tg/2012/03/ttg2012030407/13rRUwInvsM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/02/v0198", "title": "Topological Segmentation in Three-Dimensional Vector Fields", "doi": null, "abstractUrl": "/journal/tg/2004/02/v0198/13rRUxAASVM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010061578", "title": "View-Dependent Streamlines for 3D Vector Fields", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010061578/13rRUxASuGd", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/10/ttg2012101717", "title": "Design of 2D Time-Varying Vector Fields", "doi": null, "abstractUrl": "/journal/tg/2012/10/ttg2012101717/13rRUxNEqPP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/03/v0289", "title": "Segmentation of Discrete Vector Fields", "doi": null, "abstractUrl": "/journal/tg/2006/03/v0289/13rRUxcbnCj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2018/9264/0/926400a337", "title": "Inverse Projection of Vector Fields", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2018/926400a337/17D45XeKgmH", "parentPublication": { "id": "proceedings/sibgrapi/2018/9264/0", "title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx8wTfL", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNCdk2uA", "doi": "10.1109/ICPR.2008.4761410", "title": "Robust and precise eye detection based on locally selective projection", "normalizedTitle": "Robust and precise eye detection based on locally selective projection", "abstract": "This paper proposes a robust and precise eye detection method based on a new projection algorithm called locally selective projection (LSP). Along each projection axis, LSP selects a pixel and uses a function calculated in the neighborhood of the pixel as response. The local selectivity of LSP makes it robust against rotation, illumination and occlusion. Moreover, the positions of selected pixels can be recorded, providing a 2D cue for image analysis. We apply LSP to eye detection. The vertical and horizontal LSPs are first used to give reliable eye candidates, then a SVM classifier is employed to verify the real eye pairs. The experiment result compared with an AdaBoost detector shows the robustness and accuracy of proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes a robust and precise eye detection method based on a new projection algorithm called locally selective projection (LSP). Along each projection axis, LSP selects a pixel and uses a function calculated in the neighborhood of the pixel as response. The local selectivity of LSP makes it robust against rotation, illumination and occlusion. Moreover, the positions of selected pixels can be recorded, providing a 2D cue for image analysis. We apply LSP to eye detection. The vertical and horizontal LSPs are first used to give reliable eye candidates, then a SVM classifier is employed to verify the real eye pairs. The experiment result compared with an AdaBoost detector shows the robustness and accuracy of proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes a robust and precise eye detection method based on a new projection algorithm called locally selective projection (LSP). Along each projection axis, LSP selects a pixel and uses a function calculated in the neighborhood of the pixel as response. The local selectivity of LSP makes it robust against rotation, illumination and occlusion. Moreover, the positions of selected pixels can be recorded, providing a 2D cue for image analysis. We apply LSP to eye detection. The vertical and horizontal LSPs are first used to give reliable eye candidates, then a SVM classifier is employed to verify the real eye pairs. The experiment result compared with an AdaBoost detector shows the robustness and accuracy of proposed method.", "fno": "04761410", "keywords": [ "Image Classification", "Object Detection", "Support Vector Machines", "Eye Detection", "Locally Selective Projection", "Image Analysis", "SVM Classifier", "Ada Boost Detector", "Robustness", "Projection Algorithms", "Lighting", "Face Detection", "Eyes", "Support Vector Machines", "Support Vector Machine Classification", "Detectors", "Robotics And Automation", "Pixel" ], "authors": [ { "affiliation": "Department of Automation, University of Science and Technology of China, China", "fullName": "Ying Zheng", "givenName": null, "surname": "Ying Zheng", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Automation, University of Science and Technology of China, China", "fullName": "Zengfu Wang", "givenName": null, "surname": "Zengfu Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": "1051-4651", "isbn": "978-1-4244-2174-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04761409", "articleId": "12OmNBVIUso", "__typename": "AdjacentArticleType" }, "next": { "fno": "04761411", "articleId": "12OmNxwENls", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2000/0478/0/04780125", "title": "Eye Mark Pointer in Immersive Projection Display", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780125/12OmNqH9her", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iita/2009/3859/3/3859c058", "title": "Eye Location Based on Gray Projection", "doi": null, "abstractUrl": "/proceedings-article/iita/2009/3859c058/12OmNqOOrJv", "parentPublication": { "id": "proceedings/iita/2009/3859/3", "title": "2009 Third International Symposium on Intelligent Information Technology Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/1/01315066", "title": "The world in an eye [eye image interpretation]", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315066/12OmNrEL2Cb", "parentPublication": { "id": "proceedings/cvpr/2004/2158/1", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icic/2009/3634/4/3634d182", "title": "Fuzzy Support Vector Machine for Eye Expression Analysis", "doi": null, "abstractUrl": "/proceedings-article/icic/2009/3634d182/12OmNwMob5R", "parentPublication": { "id": "proceedings/icic/2009/3634/4", "title": "2009 Second International Conference on Information and Computing Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciapw/2007/2921/0/29210045", "title": "Detecting Eye Fixations by Projection Clustering", "doi": null, "abstractUrl": "/proceedings-article/iciapw/2007/29210045/12OmNxYbT28", "parentPublication": { "id": "proceedings/iciapw/2007/2921/0", "title": "2007 14th International Conference on Image Analysis and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdcloud/2015/7183/0/7183a252", "title": "Eye Detection Based on Integral Projection and Hough Round Transform", "doi": null, "abstractUrl": "/proceedings-article/bdcloud/2015/7183a252/12OmNy3AgzC", "parentPublication": { "id": "proceedings/bdcloud/2015/7183/0", "title": "2015 IEEE Fifth International Conference on Big Data and Cloud Computing (BDCloud)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/2/01394426", "title": "Real-time eye detection using face-circle fitting and dark-pixel filtering", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394426/12OmNy6HQWK", "parentPublication": { "id": "proceedings/icme/2004/8603/2", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2007/1016/0/04284816", "title": "An Automatic Eye Wink Interpretation System for the Disable", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284816/12OmNylboIE", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2006/2521/4/252140731", "title": "A hybrid classifier for precise and robust eye detection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252140731/12OmNz3bdHJ", "parentPublication": { "id": "proceedings/icpr/2006/2521/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a594", "title": "High-speed Gaze-oriented Projection by Cross-ratio-based Eye Tracking with Dual Infrared Imaging", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a594/1CJewqWywOk", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNApu5n5", "title": "2016 29th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "acronym": "sibgrapi", "groupId": "1000131", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNxxdZLn", "doi": "10.1109/SIBGRAPI.2016.048", "title": "Understanding Attribute Variability in Multidimensional Projections", "normalizedTitle": "Understanding Attribute Variability in Multidimensional Projections", "abstract": "Multidimensional Projection techniques can help users to find patterns in multidimensional data. However, while the visualization literature is rich in techniques designed to improve the projection itself, only a handful of papers shed light into the attributes that contribute to cluster formation or the spread of projected data. In this paper, we present a web-based visualization tool that enriches multidimensional projection layout with statistical measures derived from inputted data. Given a set of regions to analyze, we used statistical measures, such as variance, to highlight relevant attributes that contribute to the points' similarities in each region. Experimental tests show that our technique can help identify important attributes and explain projected data.", "abstracts": [ { "abstractType": "Regular", "content": "Multidimensional Projection techniques can help users to find patterns in multidimensional data. However, while the visualization literature is rich in techniques designed to improve the projection itself, only a handful of papers shed light into the attributes that contribute to cluster formation or the spread of projected data. In this paper, we present a web-based visualization tool that enriches multidimensional projection layout with statistical measures derived from inputted data. Given a set of regions to analyze, we used statistical measures, such as variance, to highlight relevant attributes that contribute to the points' similarities in each region. Experimental tests show that our technique can help identify important attributes and explain projected data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Multidimensional Projection techniques can help users to find patterns in multidimensional data. However, while the visualization literature is rich in techniques designed to improve the projection itself, only a handful of papers shed light into the attributes that contribute to cluster formation or the spread of projected data. In this paper, we present a web-based visualization tool that enriches multidimensional projection layout with statistical measures derived from inputted data. Given a set of regions to analyze, we used statistical measures, such as variance, to highlight relevant attributes that contribute to the points' similarities in each region. Experimental tests show that our technique can help identify important attributes and explain projected data.", "fno": "3568a297", "keywords": [ "Visualization", "Data Visualization", "Aerospace Electronics", "Manuals", "Two Dimensional Displays", "Dispersion", "Pipelines", "Interactive Visual Analysis", "Attribute Based Clustering", "High Dimensional Data Visualization" ], "authors": [ { "affiliation": null, "fullName": "Lucas Pagliosa", "givenName": "Lucas", "surname": "Pagliosa", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Paulo Pagliosa", "givenName": "Paulo", "surname": "Pagliosa", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Luis Gustavo Nonato", "givenName": "Luis Gustavo", "surname": "Nonato", "__typename": "ArticleAuthorType" } ], "idPrefix": "sibgrapi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-10-01T00:00:00", "pubType": "proceedings", "pages": "297-304", "year": "2016", "issn": "2377-5416", "isbn": "978-1-5090-3568-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3568a289", "articleId": "12OmNBSBk7B", "__typename": "AdjacentArticleType" }, "next": { "fno": "3568a305", "articleId": "12OmNyz5JQ2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sibgrapi/2015/7962/0/7962a250", "title": "Exploratory Segmentation of Vector Fields Using Multidimensional Projection", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2015/7962a250/12OmNBLdKJ0", "parentPublication": { "id": "proceedings/sibgrapi/2015/7962/0", "title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2012/4829/0/4829a032", "title": "Colorization by Multidimensional Projection", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2012/4829a032/12OmNBsLPdX", "parentPublication": { "id": "proceedings/sibgrapi/2012/4829/0", "title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2013/5099/0/5099a107", "title": "Multidimensional Projections to Explore Time-Varying Multivariate Volume Data", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2013/5099a107/12OmNrkT7Pm", "parentPublication": { "id": "proceedings/sibgrapi/2013/5099/0", "title": "2013 XXVI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2017/2219/0/2219a351", "title": "An Approach to Perform Local Analysis on Multidimensional Projection", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2017/2219a351/12OmNx4Q6AV", "parentPublication": { "id": "proceedings/sibgrapi/2017/2219/0", "title": "2017 30th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ivapp/2014/8132/0/07294435", "title": "Role of human perception In Cluster-Based Visual Analysis Of Multidimensional Data Projections", "doi": null, "abstractUrl": "/proceedings-article/ivapp/2014/07294435/12OmNxdm4Ln", "parentPublication": { "id": "proceedings/ivapp/2014/8132/0", "title": "2014 International Conference on Information Visualization Theory and Applications (IVAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2014/2874/0/2874a209", "title": "Multidimensional Projection with Radial Basis Function and Control Points Selection", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2014/2874a209/12OmNy5hRfG", "parentPublication": { "id": "proceedings/pacificvis/2014/2874/0", "title": "2014 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/01/ttg2012010121", "title": "Modified Dendrogram of Attribute Space for Multidimensional Transfer Function Design", "doi": null, "abstractUrl": "/journal/tg/2012/01/ttg2012010121/13rRUxASuhw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bracis/2019/4253/0/425300a156", "title": "Multidimensional Projections Analysis Using Performance Evaluation Planning", "doi": null, "abstractUrl": "/proceedings-article/bracis/2019/425300a156/1fHkKP2SzEQ", "parentPublication": { "id": "proceedings/bracis/2019/4253/0", "title": "2019 8th Brazilian Conference on Intelligent Systems (BRACIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a174", "title": "A class-based evaluation approach to assess multidimensional projections", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a174/1rSR84yQrcI", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552206", "title": "Measuring and Explaining the Inter-Cluster Reliability of Multidimensional Projections", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552206/1xic9jxItoI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyFCvPp", "title": "Tenth International Conference on Information Visualisation (IV'06)", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNzmLxON", "doi": "10.1109/IV.2006.122", "title": "Visual Mapping of Text Collections through a Fast High Precision Projection Technique", "normalizedTitle": "Visual Mapping of Text Collections through a Fast High Precision Projection Technique", "abstract": "This paper introduces Least Square Projection (LSP), a fast technique for projection of multi-dimensional data onto lower dimensions developed and tested successfully in the context of creation of text maps based on their content. Current solutions are either based on computationally expensive dimension reduction with no proper guarantee of the outcome or on faster techniques that need some sort of post-processing for recovering information lost during the process. LSP is based on least square approximation, a technique originally employed for surface modeling and reconstruction. Least square approximations are capable of computing the coordinates of a set of projected points based on a reduced number of control points with defined geometry. We extend the concept for general data sets. In order to perform the projection, a small number of distance calculations is necessary and no repositioning of the final points is required to obtain a satisfactory precision of the final solution. Textual information is a typically difficult data type to handle, due to its intrinsic dimensionality. We employ document corpora as a benchmark to demonstrate the capabilities of the LSP to group and separate documents by their content with high precision.", "abstracts": [ { "abstractType": "Regular", "content": "This paper introduces Least Square Projection (LSP), a fast technique for projection of multi-dimensional data onto lower dimensions developed and tested successfully in the context of creation of text maps based on their content. Current solutions are either based on computationally expensive dimension reduction with no proper guarantee of the outcome or on faster techniques that need some sort of post-processing for recovering information lost during the process. LSP is based on least square approximation, a technique originally employed for surface modeling and reconstruction. Least square approximations are capable of computing the coordinates of a set of projected points based on a reduced number of control points with defined geometry. We extend the concept for general data sets. In order to perform the projection, a small number of distance calculations is necessary and no repositioning of the final points is required to obtain a satisfactory precision of the final solution. Textual information is a typically difficult data type to handle, due to its intrinsic dimensionality. We employ document corpora as a benchmark to demonstrate the capabilities of the LSP to group and separate documents by their content with high precision.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper introduces Least Square Projection (LSP), a fast technique for projection of multi-dimensional data onto lower dimensions developed and tested successfully in the context of creation of text maps based on their content. Current solutions are either based on computationally expensive dimension reduction with no proper guarantee of the outcome or on faster techniques that need some sort of post-processing for recovering information lost during the process. LSP is based on least square approximation, a technique originally employed for surface modeling and reconstruction. Least square approximations are capable of computing the coordinates of a set of projected points based on a reduced number of control points with defined geometry. We extend the concept for general data sets. In order to perform the projection, a small number of distance calculations is necessary and no repositioning of the final points is required to obtain a satisfactory precision of the final solution. Textual information is a typically difficult data type to handle, due to its intrinsic dimensionality. We employ document corpora as a benchmark to demonstrate the capabilities of the LSP to group and separate documents by their content with high precision.", "fno": "26020282", "keywords": [], "authors": [ { "affiliation": "University of Sao Paulo, Sao Carlos, SP, Brazil", "fullName": "Fernando Vieira Paulovich", "givenName": "Fernando", "surname": "Vieira Paulovich", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Sao Paulo, Sao Carlos, SP, Brazil", "fullName": "Luis Gustavo Nonato", "givenName": "Luis Gustavo", "surname": "Nonato", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Sao Paulo, Sao Carlos, SP, Brazil", "fullName": "Rosane Minghim", "givenName": "Rosane", "surname": "Minghim", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-07-01T00:00:00", "pubType": "proceedings", "pages": "282-290", "year": "2006", "issn": "1550-6037", "isbn": "0-7695-2602-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "26020276", "articleId": "12OmNrHSCZO", "__typename": "AdjacentArticleType" }, "next": { "fno": "26020291", "articleId": "12OmNrYlmH0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2008/2174/0/04761410", "title": "Robust and precise eye detection based on locally selective projection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761410/12OmNCdk2uA", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/3/212830083", "title": "Objects Velocity Estimation on Images Sequences by Hough Transform with Projection (HTP)", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212830083/12OmNCfSqRf", "parentPublication": { "id": "proceedings/icpr/2004/2128/3", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvd/1994/4990/0/00282675", "title": "Rapid technology projection for high-level synthesis", "doi": null, "abstractUrl": "/proceedings-article/icvd/1994/00282675/12OmNqG0SMV", "parentPublication": { "id": "proceedings/icvd/1994/4990/0", "title": "Proceedings of 7th International Conference on VLSI Design", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/1/212810346", "title": "Enhancing Low-Resolution Facial Images Using Error Back-Projection for Human Identification at a Distance", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212810346/12OmNvk7K2B", "parentPublication": { "id": "proceedings/icpr/2004/2128/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2003/7965/1/7965833", "title": "Affine projection algorithm for oversampled subband adaptive filters", "doi": null, "abstractUrl": "/proceedings-article/icme/2003/7965833/12OmNxYL5cD", "parentPublication": { "id": "proceedings/icme/2003/7965/1", "title": "2003 International Conference on Multimedia and Expo. ICME '03. Proceedings (Cat. No.03TH8698)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2014/2874/0/2874a209", "title": "Multidimensional Projection with Radial Basis Function and Control Points Selection", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2014/2874a209/12OmNy5hRfG", "parentPublication": { "id": "proceedings/pacificvis/2014/2874/0", "title": "2014 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2008/3508/1/3508a528", "title": "An Improved Alternating-Projection Demosaicing Algorithm", "doi": null, "abstractUrl": "/proceedings-article/cis/2008/3508a528/12OmNybfraN", "parentPublication": { "id": "proceedings/cis/2008/3508/1", "title": "2008 International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1993/0946/4/00319665", "title": "Localized subspace projection", "doi": null, "abstractUrl": "/proceedings-article/icassp/1993/00319665/12OmNzmLxOH", "parentPublication": { "id": "proceedings/icassp/1993/0946/4", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030564", "title": "Least Square Projection: A Fast High-Precision Multidimensional Projection Technique and Its Application to Document Mapping", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030564/13rRUzphDxT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a164", "title": "ElaMorph Projection: Deformation of 3D Shape by Dynamic Projection Mapping", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a164/1pysuGClQ9a", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNC1GueH", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNCdTeQB", "doi": "", "title": "Sparse feature fidelity for image quality assessment", "normalizedTitle": "Sparse feature fidelity for image quality assessment", "abstract": "A quality metric called sparse feature fidelity (SFF) is proposed for full-reference image quality assessment (IQA). It is inspired by the fact that images are transformed into sparse representations by the primary visual cortex which is the most important part of the human visual system (HVS). The proposed method is based on sparse features that are acquired from a set of feature detectors called simple cell matrix which is trained on samples of natural images by a sparse coding algorithm. Then the SFF scores are obtained by a similarity measurement between the features of reference and distorted images. Moreover, two strategies are designed to simulate the properties of the visual perception: visual attention and visual threshold. Experimental results on four image databases show that SFF is more consistent with the subjective evaluations than the leading IQA methods.", "abstracts": [ { "abstractType": "Regular", "content": "A quality metric called sparse feature fidelity (SFF) is proposed for full-reference image quality assessment (IQA). It is inspired by the fact that images are transformed into sparse representations by the primary visual cortex which is the most important part of the human visual system (HVS). The proposed method is based on sparse features that are acquired from a set of feature detectors called simple cell matrix which is trained on samples of natural images by a sparse coding algorithm. Then the SFF scores are obtained by a similarity measurement between the features of reference and distorted images. Moreover, two strategies are designed to simulate the properties of the visual perception: visual attention and visual threshold. Experimental results on four image databases show that SFF is more consistent with the subjective evaluations than the leading IQA methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A quality metric called sparse feature fidelity (SFF) is proposed for full-reference image quality assessment (IQA). It is inspired by the fact that images are transformed into sparse representations by the primary visual cortex which is the most important part of the human visual system (HVS). The proposed method is based on sparse features that are acquired from a set of feature detectors called simple cell matrix which is trained on samples of natural images by a sparse coding algorithm. Then the SFF scores are obtained by a similarity measurement between the features of reference and distorted images. Moreover, two strategies are designed to simulate the properties of the visual perception: visual attention and visual threshold. Experimental results on four image databases show that SFF is more consistent with the subjective evaluations than the leading IQA methods.", "fno": "06460456", "keywords": [ "Computer Vision", "Feature Extraction", "Image Coding", "Image Representation", "Sparse Matrices", "Visual Databases", "Visual Perception", "Sparse Feature Fidelity", "Quality Metric", "Image Quality Assessment", "Images Transformation", "Sparse Representation", "Visual Cortex", "Human Visual System", "HVS", "Sparse Feature Acquisition", "Feature Detector", "Simple Cell Matrix", "Natural Image", "Sparse Coding Algorithm", "SFF Scores", "Features Similarity Measurement", "Image Distortion", "Visual Perception", "Visual Attention", "Visual Threshold", "Image Database", "IQA", "Vectors", "Visualization", "Sparse Matrices", "Image Quality", "Feature Extraction", "PSNR", "Indexes" ], "authors": [ { "affiliation": "College of Computer Science, Sichuan University, Chengdu, China", "fullName": "Hua-wen Chang", "givenName": "Hua-wen", "surname": "Chang", "__typename": "ArticleAuthorType" }, { "affiliation": "College of Computer Science, Sichuan University, Chengdu, China", "fullName": "Ming-hui Wang", "givenName": "Ming-hui", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "College of Computer Science, Sichuan University, Chengdu, China", "fullName": "Shu-qing Chen", "givenName": "Shu-qing", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "College of Computer Science, Sichuan University, Chengdu, China", "fullName": "Hua Yang", "givenName": "Hua", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Chengdu Aeronautic Vocational and Technical College, Chengdu, China", "fullName": "Zu-jian Huang", "givenName": "Zu-jian", "surname": "Huang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "1619-1622", "year": "2012", "issn": "1051-4651", "isbn": "978-1-4673-2216-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06460455", "articleId": "12OmNvjyxDq", "__typename": "AdjacentArticleType" }, "next": { "fno": "06460457", "articleId": "12OmNx3q71K", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/itnac/2017/6796/0/08215389", "title": "PA VIF: A passive aggressive visual information fidelity for full reference image quality assessment", "doi": null, "abstractUrl": "/proceedings-article/itnac/2017/08215389/12OmNAnuTz4", "parentPublication": { "id": "proceedings/itnac/2017/6796/0", "title": "2017 27th International Telecommunication Networks and Applications Conference (ITNAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2013/0015/0/06607462", "title": "No-reference image quality assessment metric by combining free energy theory and structural degradation model", "doi": null, "abstractUrl": "/proceedings-article/icme/2013/06607462/12OmNvjgWBa", "parentPublication": { "id": "proceedings/icme/2013/0015/0", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2013/0015/0/06607597", "title": "Hierarchical sparse coding based on spatial pooling and multi-feature fusion", "doi": null, "abstractUrl": "/proceedings-article/icme/2013/06607597/12OmNyUFfRy", "parentPublication": { "id": "proceedings/icme/2013/0015/0", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2013/5050/0/5050a281", "title": "SAR Image Quality Assessment Based on SSIM Using Textural Feature", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a281/12OmNyqRn5H", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761848", "title": "Image quality assessment with visual attention", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761848/12OmNz4SOz3", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/145P1C37", "title": "Sparse representation for blind image quality assessment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/145P1C37/12OmNzsrwqo", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2015/11/06954570", "title": "Sparse Matrix Multiplication On An Associative Processor", "doi": null, "abstractUrl": "/journal/td/2015/11/06954570/13rRUxlgy3l", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956093", "title": "Intensity-Sensitive Similarity Indexes for Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956093/1IHpqmc8Peg", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2022/9744/0/974400a861", "title": "Multi-Level Feature Aggregation Network for Full-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/ictai/2022/974400a861/1MrFYFNlVq8", "parentPublication": { "id": "proceedings/ictai/2022/9744/0", "title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2020/4272/0/427200a348", "title": "Light Field Image Quality Assessment: An Overview", "doi": null, "abstractUrl": "/proceedings-article/mipr/2020/427200a348/1mAa1RSFRa8", "parentPublication": { "id": "proceedings/mipr/2020/4272/0", "title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBNM8Mg", "title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "acronym": "sitis", "groupId": "1002425", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNqJHFr8", "doi": "10.1109/SITIS.2015.129", "title": "Reduced Reference 3D Mesh Quality Assessment Based on Statistical Models", "normalizedTitle": "Reduced Reference 3D Mesh Quality Assessment Based on Statistical Models", "abstract": "During their geometry processing and transmission 3D meshes are subject to various visual processing operations like compression, watermarking, remeshing, noise addition and so forth. In this context it is indispensable to evaluate the quality of the distorted mesh, we talk here about the mesh visual quality (MVQ) assessment. Several works have tried to evaluate the MVQ using simple geometric measures, However this metrics do not correlate well with the subjective score since they fail to reflect the perceived quality. In this paper we propose a new objective metric to evaluate the visual quality between a mesh with a perfect quality called reference mesh and its distorted version. The proposed metric uses a chosen statistical distribution to extract parameters of two random variable sets, the first set is the dihedral angles related to the reference mesh, while the second set is the dihedral angles related to the distorted mesh. The perceptual distance between two meshes is computed as the Kullback-Leibler divergence between the two sets of variables. Experimental results from two subjective databases (LIRIS masking database and LIRIS/EPFL general purpose database) and comparisons with seven objective metrics cited in the state-of-the-art demonstrate the efficacy of the proposed metric in terms of the correlation to the mean opinion scores across these databases.", "abstracts": [ { "abstractType": "Regular", "content": "During their geometry processing and transmission 3D meshes are subject to various visual processing operations like compression, watermarking, remeshing, noise addition and so forth. In this context it is indispensable to evaluate the quality of the distorted mesh, we talk here about the mesh visual quality (MVQ) assessment. Several works have tried to evaluate the MVQ using simple geometric measures, However this metrics do not correlate well with the subjective score since they fail to reflect the perceived quality. In this paper we propose a new objective metric to evaluate the visual quality between a mesh with a perfect quality called reference mesh and its distorted version. The proposed metric uses a chosen statistical distribution to extract parameters of two random variable sets, the first set is the dihedral angles related to the reference mesh, while the second set is the dihedral angles related to the distorted mesh. The perceptual distance between two meshes is computed as the Kullback-Leibler divergence between the two sets of variables. Experimental results from two subjective databases (LIRIS masking database and LIRIS/EPFL general purpose database) and comparisons with seven objective metrics cited in the state-of-the-art demonstrate the efficacy of the proposed metric in terms of the correlation to the mean opinion scores across these databases.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "During their geometry processing and transmission 3D meshes are subject to various visual processing operations like compression, watermarking, remeshing, noise addition and so forth. In this context it is indispensable to evaluate the quality of the distorted mesh, we talk here about the mesh visual quality (MVQ) assessment. Several works have tried to evaluate the MVQ using simple geometric measures, However this metrics do not correlate well with the subjective score since they fail to reflect the perceived quality. In this paper we propose a new objective metric to evaluate the visual quality between a mesh with a perfect quality called reference mesh and its distorted version. The proposed metric uses a chosen statistical distribution to extract parameters of two random variable sets, the first set is the dihedral angles related to the reference mesh, while the second set is the dihedral angles related to the distorted mesh. The perceptual distance between two meshes is computed as the Kullback-Leibler divergence between the two sets of variables. Experimental results from two subjective databases (LIRIS masking database and LIRIS/EPFL general purpose database) and comparisons with seven objective metrics cited in the state-of-the-art demonstrate the efficacy of the proposed metric in terms of the correlation to the mean opinion scores across these databases.", "fno": "9721a170", "keywords": [ "Visualization", "Three Dimensional Displays", "Databases", "Distortion", "Distortion Measurement", "Solid Modeling", "Kullback Leibler Divergence", "Quality Assessment", "Objective Metric", "3 D Triangle Mesh", "Human Visual System", "Statistical Modeling", "Gamma Distribution", "Weibull Distribution" ], "authors": [ { "affiliation": null, "fullName": "Ilyass Abouelaziz", "givenName": "Ilyass", "surname": "Abouelaziz", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mounir Omari", "givenName": "Mounir", "surname": "Omari", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mohammed El Hassouni", "givenName": "Mohammed El", "surname": "Hassouni", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hocine Cherifi", "givenName": "Hocine", "surname": "Cherifi", "__typename": "ArticleAuthorType" } ], "idPrefix": "sitis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-11-01T00:00:00", "pubType": "proceedings", "pages": "170-177", "year": "2015", "issn": null, "isbn": "978-1-4673-9721-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "9721a164", "articleId": "12OmNy4IF1r", "__typename": "AdjacentArticleType" }, "next": { "fno": "9721a178", "articleId": "12OmNyen1uj", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmip/2017/5954/0/5954a123", "title": "No-Reference Stereoscopic Image Quality Assessment Using Natural Scene Statistics", "doi": null, "abstractUrl": "/proceedings-article/icmip/2017/5954a123/12OmNAndiqC", "parentPublication": { "id": "proceedings/icmip/2017/5954/0", "title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2016/5698/0/07907557", "title": "A Curvature Based Method for Blind Mesh Visual Quality Assessment Using a General Regression Neural Network", "doi": null, "abstractUrl": "/proceedings-article/sitis/2016/07907557/12OmNBU1jKQ", "parentPublication": { "id": "proceedings/sitis/2016/5698/0", "title": "2016 12th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890209", "title": "An improved vertex-clustering-based progressive mesh encoder", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890209/12OmNrnJ6Rf", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2017/4283/0/4283a283", "title": "Mesh Visual Quality Assessment Metrics: A Comparison Study", "doi": null, "abstractUrl": "/proceedings-article/sitis/2017/4283a283/12OmNvsm6vq", "parentPublication": { "id": "proceedings/sitis/2017/4283/0", "title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890271", "title": "Automatic mesh animation preview", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890271/12OmNxWLTzX", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/11/07352354", "title": "Just Noticeable Distortion Profile for Flat-Shaded 3D Mesh Surfaces", "doi": null, "abstractUrl": "/journal/tg/2016/11/07352354/13rRUwhHcJn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2018/9385/0/938500a617", "title": "Reduced Reference Mesh Visual Quality Assessment Based on Convolutional Neural Network", "doi": null, "abstractUrl": "/proceedings-article/sitis/2018/938500a617/19RSr15RlyE", "parentPublication": { "id": "proceedings/sitis/2018/9385/0", "title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j784", "title": "Mesh R-CNN", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j784/1hVlAhHbyIo", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2019/5686/0/568600a693", "title": "Full Reference Mesh Visual Quality Assessment Using Pre-Trained Deep Network and Quality Indices", "doi": null, "abstractUrl": "/proceedings-article/sitis/2019/568600a693/1j9xD4pwAJW", "parentPublication": { "id": "proceedings/sitis/2019/5686/0", "title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2021/4989/0/09455963", "title": "A No-Reference Visual Quality Metric For 3D Color Meshes", "doi": null, "abstractUrl": "/proceedings-article/icmew/2021/09455963/1uCgrVwWrbq", "parentPublication": { "id": "proceedings/icmew/2021/4989/0", "title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBp52xK", "title": "2009 Digest of Technical Papers International Conference on Consumer Electronics", "acronym": "icce", "groupId": "1000163", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNyo1nTv", "doi": "10.1109/ICCE.2009.5012328", "title": "Structural information-based image quality assessment using LU factorization", "normalizedTitle": "Structural information-based image quality assessment using LU factorization", "abstract": "The goal of the objective image quality assessment is to quantitatively measure the image quality of a distorted image, which is close to the subjective image quality assessment such as the mean opinion score. The image quality assessment algorithms are generally classified into two methodologies: perceptual and structural. This paper proposes a structural information-based image quality assessment algorithm, in which LU factorization is used for representation of the structural information of an image. The proposed algorithm performs LU factorization of reference and distorted images, from which the distortion map is computed for measuring the quality of the distorted image. Finally, the proposed image quality metric is computed from the two-dimensional distortion map. Experimental results with the LIVE database images show the efficiency of the proposed method. In commercial systems, the proposed algorithm can be used for quality assessment of mobile contents and video coding, which effectively replaces the peak signal to noise ratio or the mean square error.", "abstracts": [ { "abstractType": "Regular", "content": "The goal of the objective image quality assessment is to quantitatively measure the image quality of a distorted image, which is close to the subjective image quality assessment such as the mean opinion score. The image quality assessment algorithms are generally classified into two methodologies: perceptual and structural. This paper proposes a structural information-based image quality assessment algorithm, in which LU factorization is used for representation of the structural information of an image. The proposed algorithm performs LU factorization of reference and distorted images, from which the distortion map is computed for measuring the quality of the distorted image. Finally, the proposed image quality metric is computed from the two-dimensional distortion map. Experimental results with the LIVE database images show the efficiency of the proposed method. In commercial systems, the proposed algorithm can be used for quality assessment of mobile contents and video coding, which effectively replaces the peak signal to noise ratio or the mean square error.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The goal of the objective image quality assessment is to quantitatively measure the image quality of a distorted image, which is close to the subjective image quality assessment such as the mean opinion score. The image quality assessment algorithms are generally classified into two methodologies: perceptual and structural. This paper proposes a structural information-based image quality assessment algorithm, in which LU factorization is used for representation of the structural information of an image. The proposed algorithm performs LU factorization of reference and distorted images, from which the distortion map is computed for measuring the quality of the distorted image. Finally, the proposed image quality metric is computed from the two-dimensional distortion map. Experimental results with the LIVE database images show the efficiency of the proposed method. In commercial systems, the proposed algorithm can be used for quality assessment of mobile contents and video coding, which effectively replaces the peak signal to noise ratio or the mean square error.", "fno": "05012328", "keywords": [], "authors": [ { "affiliation": "Department of Electronic Engineering, Sogang Univ., C.P.O. Box 1142, Seoul 100-611, Korea", "fullName": "Ho-Sung Han", "givenName": null, "surname": "Ho-Sung Han", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Electronic Engineering, Sogang Univ., C.P.O. Box 1142, Seoul 100-611, Korea", "fullName": "Dong-O Kim", "givenName": null, "surname": "Dong-O Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Electronic Engineering, Sogang Univ., C.P.O. Box 1142, Seoul 100-611, Korea", "fullName": "Rae-Hong Park", "givenName": "Rae-Hong", "surname": "Park", "__typename": "ArticleAuthorType" } ], "idPrefix": "icce", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-01-01T00:00:00", "pubType": "proceedings", "pages": "1-2", "year": "2009", "issn": null, "isbn": "978-1-4244-2558-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05012327", "articleId": "12OmNBqdrgP", "__typename": "AdjacentArticleType" }, "next": { "fno": "05012329", "articleId": "12OmNCd2rpe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/picict/2013/4984/0/4984a032", "title": "A Pixel-Based Framework for Blind Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/picict/2013/4984a032/12OmNA1Vnv8", "parentPublication": { "id": "proceedings/picict/2013/4984/0", "title": "Palestinian International Conference on Information and Communication Technology (PICICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmip/2017/5954/0/5954a123", "title": "No-Reference Stereoscopic Image Quality Assessment Using Natural Scene Statistics", "doi": null, "abstractUrl": "/proceedings-article/icmip/2017/5954a123/12OmNAndiqC", "parentPublication": { "id": "proceedings/icmip/2017/5954/0", "title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2016/7258/0/07552955", "title": "Blind quality assessment of compressed images via pseudo structural similarity", "doi": null, "abstractUrl": "/proceedings-article/icme/2016/07552955/12OmNwvVrMl", "parentPublication": { "id": "proceedings/icme/2016/7258/0", "title": "2016 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2016/5510/0/07881422", "title": "Classification of Image Distortions for Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/csci/2016/07881422/12OmNx3q6YT", "parentPublication": { "id": "proceedings/csci/2016/5510/0", "title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g373", "title": "Blind Predicting Similar Quality Map for Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g373/17D45VObpPA", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500a376", "title": "No-Reference Image Quality Assessment: An Attention Driven Approach", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500a376/18j8KtV3XrO", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900b799", "title": "Focused Feature Differentiation Network for Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900b799/1G56BGdaJwc", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900b200", "title": "Image Quality Assessment with Gradient Siamese Network", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900b200/1G56qFWRBNS", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09106040", "title": "Quality Difference Ranking Model For Smartphone Camera Photo Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09106040/1kwqBu07j5S", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceai/2021/3960/0/396000a108", "title": "Generative Difference Image for Blind Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icceai/2021/396000a108/1xqyS053BIc", "parentPublication": { "id": "proceedings/icceai/2021/3960/0", "title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45VObpPA", "doi": "10.1109/CVPR.2018.00667", "title": "Blind Predicting Similar Quality Map for Image Quality Assessment", "normalizedTitle": "Blind Predicting Similar Quality Map for Image Quality Assessment", "abstract": "A key problem in blind image quality assessment (BIQA) is how to effectively model the properties of human visual system in a data-driven manner. In this paper, we propose a simple and efficient BIQA model based on a novel framework which consists of a fully convolutional neural network (FCNN) and a pooling network to solve this problem. In principle, FCNN is capable of predicting a pixel-by-pixel similar quality map only from a distorted image by using the intermediate similarity maps derived from conventional full-reference image quality assessment methods. The predicted pixel-by-pixel quality maps have good consistency with the distortion correlations between the reference and distorted images. Finally, a deep pooling network regresses the quality map into a score. Experiments have demonstrated that our predictions outperform many state-of-the-art BIQA methods.", "abstracts": [ { "abstractType": "Regular", "content": "A key problem in blind image quality assessment (BIQA) is how to effectively model the properties of human visual system in a data-driven manner. In this paper, we propose a simple and efficient BIQA model based on a novel framework which consists of a fully convolutional neural network (FCNN) and a pooling network to solve this problem. In principle, FCNN is capable of predicting a pixel-by-pixel similar quality map only from a distorted image by using the intermediate similarity maps derived from conventional full-reference image quality assessment methods. The predicted pixel-by-pixel quality maps have good consistency with the distortion correlations between the reference and distorted images. Finally, a deep pooling network regresses the quality map into a score. Experiments have demonstrated that our predictions outperform many state-of-the-art BIQA methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A key problem in blind image quality assessment (BIQA) is how to effectively model the properties of human visual system in a data-driven manner. In this paper, we propose a simple and efficient BIQA model based on a novel framework which consists of a fully convolutional neural network (FCNN) and a pooling network to solve this problem. In principle, FCNN is capable of predicting a pixel-by-pixel similar quality map only from a distorted image by using the intermediate similarity maps derived from conventional full-reference image quality assessment methods. The predicted pixel-by-pixel quality maps have good consistency with the distortion correlations between the reference and distorted images. Finally, a deep pooling network regresses the quality map into a score. Experiments have demonstrated that our predictions outperform many state-of-the-art BIQA methods.", "fno": "642000g373", "keywords": [ "Distortion", "Image Processing", "Learning Artificial Intelligence", "Neural Nets", "BIQA Model", "Image Distortion", "Full Reference Image Quality Assessment Methods", "Intermediate Similarity Maps", "Pixel By Pixel Similar Quality Map", "FCNN", "Fully Convolutional Neural Network", "Human Visual System", "Blind Image Quality Assessment", "Deep Pooling Network", "Distortion Correlations", "Image Quality", "Distortion", "Indexes", "Predictive Models", "Feature Extraction", "Degradation", "Convolutional Neural Networks" ], "authors": [ { "affiliation": null, "fullName": "Da Pan", "givenName": "Da", "surname": "Pan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ping Shi", "givenName": "Ping", "surname": "Shi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ming Hou", "givenName": "Ming", "surname": "Hou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zefeng Ying", "givenName": "Zefeng", "surname": "Ying", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Sizhe Fu", "givenName": "Sizhe", "surname": "Fu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yuan Zhang", "givenName": "Yuan", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "6373-6382", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000g363", "articleId": "17D45VObpQ7", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000g383", "articleId": "17D45VsBTVv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2017/1034/0/1034a510", "title": "Hierarchical Feature Degradation Based Blind Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034a510/12OmNA0MZ5p", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bracis/2017/2407/0/2407a252", "title": "Blind Image Quality Assessment Using Local Variant Patterns", "doi": null, "abstractUrl": "/proceedings-article/bracis/2017/2407a252/12OmNrJAeix", "parentPublication": { "id": "proceedings/bracis/2017/2407/0", "title": "2017 Brazilian Conference on Intelligent Systems (BRACIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/06909936", "title": "Beyond Human Opinion Scores: Blind Image Quality Assessment Based on Synthetic Scores", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/06909936/12OmNvAiSjn", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/145P1C37", "title": "Sparse representation for blind image quality assessment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/145P1C37/12OmNzsrwqo", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2018/5321/0/08499448", "title": "Joint Entropy Degradation Based Blind Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2018/08499448/17D45WZZ7Fr", "parentPublication": { "id": "proceedings/bigmm/2018/5321/0", "title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/03/09784904", "title": "Continual Learning for Blind Image Quality Assessment", "doi": null, "abstractUrl": "/journal/tp/2023/03/09784904/1DQLYnuTpAs", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859608", "title": "Deep Blind Image Quality Assessment Using Dual-Order Statistics", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859608/1G9DVKufHos", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09860021", "title": "Cycleiqa: Blind Image Quality Assessment Via Cycle-Consistent Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09860021/1G9EIMPLUqs", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d664", "title": "Blindly Assess Image Quality in the Wild Guided by a Self-Adaptive Hyper Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d664/1m3nyfkmtws", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/09/09399249", "title": "Active Fine-Tuning From gMAD Examples Improves Blind Image Quality Assessment", "doi": null, "abstractUrl": "/journal/tp/2022/09/09399249/1sDoIm3fFPa", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "18j8Ecq0jn2", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "18j8KtV3XrO", "doi": "10.1109/WACV.2019.00046", "title": "No-Reference Image Quality Assessment: An Attention Driven Approach", "normalizedTitle": "No-Reference Image Quality Assessment: An Attention Driven Approach", "abstract": "In this paper, we tackle no-reference image quality assessment (NR-IQA), which aims to predict the perceptual quality of a test image without referencing its pristine-quality counterpart. The free-energy brain theory implies that the human visual system (HVS) tends to predict the pristine image while perceiving a distorted one. Besides, image quality assessment heavily depends on the way how human beings attend to distorted images. Motivated by that, the distorted image is restored first. Then given the distorted-restored pair, we make the first attempt to formulate the NR-IQA as a dynamic attentional process and implement it via reinforcement learning. The reward is derived from two tasks-classifying the distortion type and predicting the perceptual score of a test image. The model learns a policy to sample a sequence of fixation areas with a goal to maximize the expectation of the accumulated rewards. The observations of the fixation areas are aggregated through a recurrent neural network (RNN) and the robust averaging strategy which assigns different weights on different fixation areas. Extensive experiments on TID2008, TID2013 and CSIQ demonstrate the superiority of our method.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we tackle no-reference image quality assessment (NR-IQA), which aims to predict the perceptual quality of a test image without referencing its pristine-quality counterpart. The free-energy brain theory implies that the human visual system (HVS) tends to predict the pristine image while perceiving a distorted one. Besides, image quality assessment heavily depends on the way how human beings attend to distorted images. Motivated by that, the distorted image is restored first. Then given the distorted-restored pair, we make the first attempt to formulate the NR-IQA as a dynamic attentional process and implement it via reinforcement learning. The reward is derived from two tasks-classifying the distortion type and predicting the perceptual score of a test image. The model learns a policy to sample a sequence of fixation areas with a goal to maximize the expectation of the accumulated rewards. The observations of the fixation areas are aggregated through a recurrent neural network (RNN) and the robust averaging strategy which assigns different weights on different fixation areas. Extensive experiments on TID2008, TID2013 and CSIQ demonstrate the superiority of our method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we tackle no-reference image quality assessment (NR-IQA), which aims to predict the perceptual quality of a test image without referencing its pristine-quality counterpart. The free-energy brain theory implies that the human visual system (HVS) tends to predict the pristine image while perceiving a distorted one. Besides, image quality assessment heavily depends on the way how human beings attend to distorted images. Motivated by that, the distorted image is restored first. Then given the distorted-restored pair, we make the first attempt to formulate the NR-IQA as a dynamic attentional process and implement it via reinforcement learning. The reward is derived from two tasks-classifying the distortion type and predicting the perceptual score of a test image. The model learns a policy to sample a sequence of fixation areas with a goal to maximize the expectation of the accumulated rewards. The observations of the fixation areas are aggregated through a recurrent neural network (RNN) and the robust averaging strategy which assigns different weights on different fixation areas. Extensive experiments on TID2008, TID2013 and CSIQ demonstrate the superiority of our method.", "fno": "197500a376", "keywords": [ "Image Restoration", "Learning Artificial Intelligence", "Recurrent Neural Nets", "Human Beings", "Distorted Image", "Distorted Restored Pair", "NR IQA", "Dynamic Attentional Process", "Attention Driven Approach", "No Reference Image Quality Assessment", "Perceptual Quality", "Pristine Quality Counterpart", "Free Energy Brain Theory", "Human Visual System", "Pristine Image", "Reinforcement Learning", "Perceptual Score", "Fixation Area Sequence", "Recurrent Neural Network", "Image Restoration", "Image Quality", "Task Analysis", "Feature Extraction", "Distortion", "Computational Modeling", "Recurrent Neural Networks" ], "authors": [ { "affiliation": null, "fullName": "Diqi Chen", "givenName": "Diqi", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yizhou Wang", "givenName": "Yizhou", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hongyu Ren", "givenName": "Hongyu", "surname": "Ren", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Wen Gao", "givenName": "Wen", "surname": "Gao", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-01-01T00:00:00", "pubType": "proceedings", "pages": "376-385", "year": "2019", "issn": "1550-5790", "isbn": "978-1-7281-1975-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "197500a367", "articleId": "18j8NOvlRIY", "__typename": "AdjacentArticleType" }, "next": { "fno": "197500a386", "articleId": "18j8JHBz3MY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032b040", "title": "RankIQA: Learning from Rankings for No-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b040/12OmNAkEU63", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019508", "title": "An accurate deep convolutional neural networks model for no-reference image quality assessment", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019508/12OmNvAiSBe", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a732", "title": "Hallucinated-IQA: No-Reference Image Quality Assessment via Adversarial Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a732/17D45W9KVJy", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ai/2023/01/09695198", "title": "No-Reference Image Quality Assessment via Multibranch Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/ai/2023/01/09695198/1AvqPnqu9Gw", "parentPublication": { "id": "trans/ai", "title": "IEEE Transactions on Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500d989", "title": "No-Reference Image Quality Assessment via Transformers, Relative Ranking, and Self-Consistency", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500d989/1B1417PUbkI", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200k0222", "title": "Learning Conditional Knowledge Distillation for Degraded-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200k0222/1BmEDOGOsRG", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900b190", "title": "MANIQA: Multi-dimension Attention Network for No-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900b190/1G56mBh1R5e", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600c458", "title": "No Reference Opinion Unaware Quality Assessment of Authentically Distorted Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600c458/1KxVrQpNTXi", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2023/2056/0/205600a538", "title": "Image Quality Assessment using Semi-Supervised Representation Learning", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2023/205600a538/1Kzz1FkXejC", "parentPublication": { "id": "proceedings/wacvw/2023/2056/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102895", "title": "Active Inference of GAN for No-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102895/1kwr8uBp7Uc", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G55WEFExd6", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G56BGdaJwc", "doi": "10.1109/CVPRW56347.2022.00195", "title": "Focused Feature Differentiation Network for Image Quality Assessment", "normalizedTitle": "Focused Feature Differentiation Network for Image Quality Assessment", "abstract": "Image quality assessment (IQA) intended to assess the perceptual quality of images has been an essential problem in both human and machine vision. Recently, with the help of deep neural network (DNN), IQA algorithms can extract more valuable differences between the distorted and reference images than the traditional algorithms, and thus the performance of DNN-based algorithms is more satisfactory than that of previous algorithms. However, the accuracy for different distorted images preference rating of the existing DNN-based quality assessment methods will be decreased when multiple distorted images are quite similar to each other or to the reference image. To tackle this problem, we propose a focused feature differentiation network (FFDN) to highlight the feature maps with greater distorted and reference differentiation. Furthermore, we use the multi-scale feature fusion module to fuse the focused differentiation features at different scale receptive fields. To further improve the accuracy of our method, we predict the mean opinion score and differentiation score by stages and combine them with different self-learning weights. Finally, we convert the weighted score into different image preference degrees. Experimental results on the validation dataset of CLIC2022 and test dataset of CLIC2021 show that the accuracy of our model FFDN is higher than other excellent quality assessment methods.", "abstracts": [ { "abstractType": "Regular", "content": "Image quality assessment (IQA) intended to assess the perceptual quality of images has been an essential problem in both human and machine vision. Recently, with the help of deep neural network (DNN), IQA algorithms can extract more valuable differences between the distorted and reference images than the traditional algorithms, and thus the performance of DNN-based algorithms is more satisfactory than that of previous algorithms. However, the accuracy for different distorted images preference rating of the existing DNN-based quality assessment methods will be decreased when multiple distorted images are quite similar to each other or to the reference image. To tackle this problem, we propose a focused feature differentiation network (FFDN) to highlight the feature maps with greater distorted and reference differentiation. Furthermore, we use the multi-scale feature fusion module to fuse the focused differentiation features at different scale receptive fields. To further improve the accuracy of our method, we predict the mean opinion score and differentiation score by stages and combine them with different self-learning weights. Finally, we convert the weighted score into different image preference degrees. Experimental results on the validation dataset of CLIC2022 and test dataset of CLIC2021 show that the accuracy of our model FFDN is higher than other excellent quality assessment methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Image quality assessment (IQA) intended to assess the perceptual quality of images has been an essential problem in both human and machine vision. Recently, with the help of deep neural network (DNN), IQA algorithms can extract more valuable differences between the distorted and reference images than the traditional algorithms, and thus the performance of DNN-based algorithms is more satisfactory than that of previous algorithms. However, the accuracy for different distorted images preference rating of the existing DNN-based quality assessment methods will be decreased when multiple distorted images are quite similar to each other or to the reference image. To tackle this problem, we propose a focused feature differentiation network (FFDN) to highlight the feature maps with greater distorted and reference differentiation. Furthermore, we use the multi-scale feature fusion module to fuse the focused differentiation features at different scale receptive fields. To further improve the accuracy of our method, we predict the mean opinion score and differentiation score by stages and combine them with different self-learning weights. Finally, we convert the weighted score into different image preference degrees. Experimental results on the validation dataset of CLIC2022 and test dataset of CLIC2021 show that the accuracy of our model FFDN is higher than other excellent quality assessment methods.", "fno": "873900b799", "keywords": [ "Computer Vision", "Deep Learning Artificial Intelligence", "Distortion", "Feature Extraction", "Learning Artificial Intelligence", "Neural Nets", "Multiscale Feature Fusion Module", "Focused Differentiation Features", "Different Scale Receptive Fields", "Mean Opinion Score", "Differentiation Score", "Focused Feature Differentiation Network", "Image Quality Assessment", "Human Machine Vision", "Deep Neural Network", "IQA Algorithms", "Valuable Differences", "Distorted Reference Images", "DNN Based Quality Assessment Methods", "Multiple Distorted Images", "Reference Image", "Feature Maps", "Distorted Reference Differentiation", "Distorted Image Preference Rating", "Image Preference Degrees", "Self Learning Weights", "Image Quality", "Deep Learning", "Fuses", "Convolution", "Machine Vision", "Conferences", "Neural Networks" ], "authors": [ { "affiliation": "Xidian University,Xi’an,China", "fullName": "Gang He", "givenName": "Gang", "surname": "He", "__typename": "ArticleAuthorType" }, { "affiliation": "Xidian University,Xi’an,China", "fullName": "Yong Wang", "givenName": "Yong", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Xidian University,Xi’an,China", "fullName": "Li Xu", "givenName": "Li", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Xidian University,Xi’an,China", "fullName": "Wenli Zhang", "givenName": "Wenli", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Kuaishou Technology,Beijing,China", "fullName": "Ming Sun", "givenName": "Ming", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "Kuaishou Technology,Beijing,China", "fullName": "Xing Wen", "givenName": "Xing", "surname": "Wen", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "1799-1803", "year": "2022", "issn": null, "isbn": "978-1-6654-8739-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "873900b794", "articleId": "1G56qmkQ2zu", "__typename": "AdjacentArticleType" }, "next": { "fno": "873900b804", "articleId": "1G57ferfqww", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icisce/2017/3013/0/3013a234", "title": "Contrast Changed Image Quality Assessment Based on Dual-Path Feature-Difference Network", "doi": null, "abstractUrl": "/proceedings-article/icisce/2017/3013a234/12OmNAObbBJ", "parentPublication": { "id": "proceedings/icisce/2017/3013/0", "title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460456", "title": "Sparse feature fidelity for image quality assessment", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460456/12OmNCdTeQB", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2016/5510/0/07881422", "title": "Classification of Image Distortions for Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/csci/2016/07881422/12OmNx3q6YT", "parentPublication": { "id": "proceedings/csci/2016/5510/0", "title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500a376", "title": "No-Reference Image Quality Assessment: An Attention Driven Approach", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500a376/18j8KtV3XrO", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a939", "title": "Conformer and Blind Noisy Students for Improved Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a939/1G562YGgmze", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2023/2056/0/205600a538", "title": "Image Quality Assessment using Semi-Supervised Representation Learning", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2023/205600a538/1Kzz1FkXejC", "parentPublication": { "id": "proceedings/wacvw/2023/2056/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2022/9744/0/974400a861", "title": "Multi-Level Feature Aggregation Network for Full-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/ictai/2022/974400a861/1MrFYFNlVq8", "parentPublication": { "id": "proceedings/ictai/2022/9744/0", "title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/09/09399249", "title": "Active Fine-Tuning From gMAD Examples Improves Blind Image Quality Assessment", "doi": null, "abstractUrl": "/journal/tp/2022/09/09399249/1sDoIm3fFPa", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900a433", "title": "Perceptual Image Quality Assessment with Transformers", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900a433/1yVzNTyE0WQ", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900a443", "title": "IQMA Network: Image Quality Multi-scale Assessment Network", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900a443/1yXsEB71mg0", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G55WEFExd6", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G56qFWRBNS", "doi": "10.1109/CVPRW56347.2022.00127", "title": "Image Quality Assessment with Gradient Siamese Network", "normalizedTitle": "Image Quality Assessment with Gradient Siamese Network", "abstract": "In this work, we introduce Gradient Siamese Network (GSN) for image quality assessment. The proposed method is skilled in capturing the gradient features between distorted images and reference images in full-reference image quality assessment (IQA) task. We utilize Central Differential Convolution to obtain both semantic features and detail difference hidden in image pair. Furthermore, spatial attention guides the network to concentrate on regions related to image detail. For the low-level, mid-level, and high-level features extracted by the network, we innovatively design a multi-level fusion method to improve the efficiency of feature utilization. In addition to the common mean square error supervision, we further consider the relative distance among batch samples and successfully apply KL divergence loss to the image quality assessment task. We experimented the proposed algorithm GSN on several publicly available datasets and proved its superior performance. Our network won the second place in NTIRE 2022 Perceptual Image Quality Assessment Challenge track 1 Full-Reference [1].", "abstracts": [ { "abstractType": "Regular", "content": "In this work, we introduce Gradient Siamese Network (GSN) for image quality assessment. The proposed method is skilled in capturing the gradient features between distorted images and reference images in full-reference image quality assessment (IQA) task. We utilize Central Differential Convolution to obtain both semantic features and detail difference hidden in image pair. Furthermore, spatial attention guides the network to concentrate on regions related to image detail. For the low-level, mid-level, and high-level features extracted by the network, we innovatively design a multi-level fusion method to improve the efficiency of feature utilization. In addition to the common mean square error supervision, we further consider the relative distance among batch samples and successfully apply KL divergence loss to the image quality assessment task. We experimented the proposed algorithm GSN on several publicly available datasets and proved its superior performance. Our network won the second place in NTIRE 2022 Perceptual Image Quality Assessment Challenge track 1 Full-Reference [1].", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this work, we introduce Gradient Siamese Network (GSN) for image quality assessment. The proposed method is skilled in capturing the gradient features between distorted images and reference images in full-reference image quality assessment (IQA) task. We utilize Central Differential Convolution to obtain both semantic features and detail difference hidden in image pair. Furthermore, spatial attention guides the network to concentrate on regions related to image detail. For the low-level, mid-level, and high-level features extracted by the network, we innovatively design a multi-level fusion method to improve the efficiency of feature utilization. In addition to the common mean square error supervision, we further consider the relative distance among batch samples and successfully apply KL divergence loss to the image quality assessment task. We experimented the proposed algorithm GSN on several publicly available datasets and proved its superior performance. Our network won the second place in NTIRE 2022 Perceptual Image Quality Assessment Challenge track 1 Full-Reference [1].", "fno": "873900b200", "keywords": [ "Feature Extraction", "Image Capture", "Image Classification", "Image Fusion", "Image Sampling", "Mean Square Error Methods", "Full Reference Image Quality Assessment Task", "Multilevel Fusion Method", "Gradient Features", "Gradient Siamese Network", "GSN", "IQA", "Features Extraction", "Mean Square Error", "Image Quality", "Convolution", "Conferences", "Semantics", "Mean Square Error Methods", "Network Architecture", "Feature Extraction" ], "authors": [ { "affiliation": "Interactive Entertainment Group of Netease Inc,Guangzhou,China", "fullName": "Heng Cong", "givenName": "Heng", "surname": "Cong", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Entertainment Group of Netease Inc,Guangzhou,China", "fullName": "Lingzhi Fu", "givenName": "Lingzhi", "surname": "Fu", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Entertainment Group of Netease Inc,Guangzhou,China", "fullName": "Rongyu Zhang", "givenName": "Rongyu", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Entertainment Group of Netease Inc,Guangzhou,China", "fullName": "Yusheng Zhang", "givenName": "Yusheng", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Entertainment Group of Netease Inc,Guangzhou,China", "fullName": "Hao Wang", "givenName": "Hao", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Entertainment Group of Netease Inc,Guangzhou,China", "fullName": "Jiarong He", "givenName": "Jiarong", "surname": "He", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Entertainment Group of Netease Inc,Guangzhou,China", "fullName": "Jin Gao", "givenName": "Jin", "surname": "Gao", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "1200-1209", "year": "2022", "issn": null, "isbn": "978-1-6654-8739-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "873900b190", "articleId": "1G56mBh1R5e", "__typename": "AdjacentArticleType" }, "next": { "fno": "873900b210", "articleId": "1G570aIkfyE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/picict/2013/4984/0/4984a032", "title": "A Pixel-Based Framework for Blind Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/picict/2013/4984a032/12OmNA1Vnv8", "parentPublication": { "id": "proceedings/picict/2013/4984/0", "title": "Palestinian International Conference on Information and Communication Technology (PICICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2016/7258/0/07552936", "title": "Distortion recognition for image quality assessment with convolutional neural network", "doi": null, "abstractUrl": "/proceedings-article/icme/2016/07552936/12OmNAFWOQV", "parentPublication": { "id": "proceedings/icme/2016/7258/0", "title": "2016 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisp/2008/3119/1/3119a467", "title": "Blind Image Quality Assessment for Measuring Image Blur", "doi": null, "abstractUrl": "/proceedings-article/cisp/2008/3119a467/12OmNqJ8tbn", "parentPublication": { "id": "proceedings/cisp/2008/3119/1", "title": "Image and Signal Processing, Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457b969", "title": "Deep Learning of Human Visual Sensitivity in Image Quality Assessment Framework", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457b969/12OmNrkT7Im", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmip/2016/8940/0/8940a051", "title": "No-Reference Image Quality Assessment for Defocus Restoration", "doi": null, "abstractUrl": "/proceedings-article/icmip/2016/8940a051/12OmNwO5LVT", "parentPublication": { "id": "proceedings/icmip/2016/8940/0", "title": "2016 First International Conference on Multimedia and Image Processing (ICMIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000a851", "title": "Synthesized Texture Quality Assessment via Multi-scale Spatial and Statistical Texture Attributes of Image and Gradient Magnitude Coefficients", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000a851/17D45VWpMys", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900b799", "title": "Focused Feature Differentiation Network for Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900b799/1G56BGdaJwc", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2022/9744/0/974400a861", "title": "Multi-Level Feature Aggregation Network for Full-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/ictai/2022/974400a861/1MrFYFNlVq8", "parentPublication": { "id": "proceedings/ictai/2022/9744/0", "title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b864", "title": "Encoding Distortions for Multi-task Full-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b864/1cdOIfA2oGA", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900a433", "title": "Perceptual Image Quality Assessment with Transformers", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900a433/1yVzNTyE0WQ", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KzyWski0hO", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "acronym": "wacvw", "groupId": "10029377", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1Kzz1FkXejC", "doi": "10.1109/WACVW58289.2023.00060", "title": "Image Quality Assessment using Semi-Supervised Representation Learning", "normalizedTitle": "Image Quality Assessment using Semi-Supervised Representation Learning", "abstract": "In this paper, we propose a framework for learning feature representations for Image Quality Assessment (IQA) using contrastive learning. To account for the absence of large-scale IQA dataset, we pretrain an image encoder to cluster images based on the image quality using synthetically distorted versions of pristine unlabeled images. Images of similar quality are grouped closer in embedding space, while simultaneously pushing apart images of dissimilar quality. In addition we show that, augmenting the contrastive learning task with downstream aware joint supervision results in feature representations that are more suitable and easily transferable for IQA specific tasks. We study the effectiveness of the learnt representations in downstream task of image quality prediction and show that our model achieves superior performance on both synthetically and authentically distorted IQA datasets when compared to other deep feature-based IQA methods.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a framework for learning feature representations for Image Quality Assessment (IQA) using contrastive learning. To account for the absence of large-scale IQA dataset, we pretrain an image encoder to cluster images based on the image quality using synthetically distorted versions of pristine unlabeled images. Images of similar quality are grouped closer in embedding space, while simultaneously pushing apart images of dissimilar quality. In addition we show that, augmenting the contrastive learning task with downstream aware joint supervision results in feature representations that are more suitable and easily transferable for IQA specific tasks. We study the effectiveness of the learnt representations in downstream task of image quality prediction and show that our model achieves superior performance on both synthetically and authentically distorted IQA datasets when compared to other deep feature-based IQA methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a framework for learning feature representations for Image Quality Assessment (IQA) using contrastive learning. To account for the absence of large-scale IQA dataset, we pretrain an image encoder to cluster images based on the image quality using synthetically distorted versions of pristine unlabeled images. Images of similar quality are grouped closer in embedding space, while simultaneously pushing apart images of dissimilar quality. In addition we show that, augmenting the contrastive learning task with downstream aware joint supervision results in feature representations that are more suitable and easily transferable for IQA specific tasks. We study the effectiveness of the learnt representations in downstream task of image quality prediction and show that our model achieves superior performance on both synthetically and authentically distorted IQA datasets when compared to other deep feature-based IQA methods.", "fno": "205600a538", "keywords": [ "Deep Learning Artificial Intelligence", "Distortion", "Feature Extraction", "Image Classification", "Image Representation", "Learning Artificial Intelligence", "Pattern Clustering", "Regression Analysis", "Supervised Learning", "Unsupervised Learning", "Apart Images", "Authentically Distorted IQA Datasets", "Cluster Images", "Contrastive Learning Task", "Deep Feature Based IQA Methods", "Dissimilar Quality", "Downstream Aware Joint Supervision Results", "Feature Representations", "Image Encoder", "Image Quality Assessment", "Image Quality Prediction", "IQA Specific Tasks", "Large Scale IQA", "Learnt Representations", "Pristine Unlabeled Images", "Semisupervised Representation Learning", "Similar Quality", "Synthetically IQA Datasets", "Image Quality", "Representation Learning", "Computer Vision", "Conferences", "Computational Modeling", "Predictive Models", "Task Analysis" ], "authors": [ { "affiliation": "Amazon,Bangalore,India", "fullName": "Vishnu Prabhakaran", "givenName": "Vishnu", "surname": "Prabhakaran", "__typename": "ArticleAuthorType" }, { "affiliation": "Amazon,Seattle,USA", "fullName": "Gokul Swamy", "givenName": "Gokul", "surname": "Swamy", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-01-01T00:00:00", "pubType": "proceedings", "pages": "538-547", "year": "2023", "issn": null, "isbn": "979-8-3503-2056-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "205600a528", "articleId": "1Kzz0IxIvV6", "__typename": "AdjacentArticleType" }, "next": { "fno": "205600a548", "articleId": "1KzyY1CrDsQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032b040", "title": "RankIQA: Learning from Rankings for No-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b040/12OmNAkEU63", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2014/04/mmu2014040067", "title": "Training Quality-Aware Filters for No-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/magazine/mu/2014/04/mmu2014040067/13rRUNvyaht", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2022/5824/0/582400a093", "title": "Image Quality Assessment using Synthetic Images", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2022/582400a093/1B12AwCzr5m", "parentPublication": { "id": "proceedings/wacvw/2022/5824/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a939", "title": "Conformer and Blind Noisy Students for Improved Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a939/1G562YGgmze", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f841", "title": "Incorporating Semi-Supervised and Positive-Unlabeled Learning for Boosting Full Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f841/1H1m0HCHu3C", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600c458", "title": "No Reference Opinion Unaware Quality Assessment of Authentically Distorted Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600c458/1KxVrQpNTXi", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2022/9744/0/974400a861", "title": "Multi-Level Feature Aggregation Network for Full-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/ictai/2022/974400a861/1MrFYFNlVq8", "parentPublication": { "id": "proceedings/ictai/2022/9744/0", "title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09106040", "title": "Quality Difference Ranking Model For Smartphone Camera Photo Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09106040/1kwqBu07j5S", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102895", "title": "Active Inference of GAN for No-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102895/1kwr8uBp7Uc", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900a443", "title": "IQMA Network: Image Quality Multi-scale Assessment Network", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900a443/1yXsEB71mg0", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MeoElmyyEo", "title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "acronym": "sitis", "groupId": "10089803", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1MeoL090t9e", "doi": "10.1109/SITIS57111.2022.00097", "title": "3D point cloud quality assessment method using Mahalanobis distance", "normalizedTitle": "3D point cloud quality assessment method using Mahalanobis distance", "abstract": "In this work, we suggest a Reduced-reference (RR) method assess the visual quality of a deformed point cloud with a reference point cloud of ideal quality. To do so, we extract geometrical and perceptual attributes of both reference and distorted PC. Estimates of their statistical properties (Entropy, Mean, Standard Deviation, Median, Kurtosis, and Skewness) form a features vector for each PC. The perceptual metric between two point clouds is computed using the Mahalanobis distance between their feature vectors. Finally, the random forest regressor is employed to estimate the quality score prediction. To validate our method, a set of experiments are conducted on an open subjective colored point cloud dataset (SJTU-PCQA). The results show that the suggested quality assessment method surpasses some contending methods in regards to correlation with average opinion scores.", "abstracts": [ { "abstractType": "Regular", "content": "In this work, we suggest a Reduced-reference (RR) method assess the visual quality of a deformed point cloud with a reference point cloud of ideal quality. To do so, we extract geometrical and perceptual attributes of both reference and distorted PC. Estimates of their statistical properties (Entropy, Mean, Standard Deviation, Median, Kurtosis, and Skewness) form a features vector for each PC. The perceptual metric between two point clouds is computed using the Mahalanobis distance between their feature vectors. Finally, the random forest regressor is employed to estimate the quality score prediction. To validate our method, a set of experiments are conducted on an open subjective colored point cloud dataset (SJTU-PCQA). The results show that the suggested quality assessment method surpasses some contending methods in regards to correlation with average opinion scores.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this work, we suggest a Reduced-reference (RR) method assess the visual quality of a deformed point cloud with a reference point cloud of ideal quality. To do so, we extract geometrical and perceptual attributes of both reference and distorted PC. Estimates of their statistical properties (Entropy, Mean, Standard Deviation, Median, Kurtosis, and Skewness) form a features vector for each PC. The perceptual metric between two point clouds is computed using the Mahalanobis distance between their feature vectors. Finally, the random forest regressor is employed to estimate the quality score prediction. To validate our method, a set of experiments are conducted on an open subjective colored point cloud dataset (SJTU-PCQA). The results show that the suggested quality assessment method surpasses some contending methods in regards to correlation with average opinion scores.", "fno": "649500a616", "keywords": [ "Entropy", "Feature Extraction", "Image Colour Analysis", "Random Forests", "Regression Analysis", "Contending Methods", "Deformed Point Cloud", "Feature Vectors", "Features Vector", "Geometrical Attributes", "Ideal Quality", "Mahalanobis Distance", "Open Subjective Colored Point Cloud Dataset", "PC", "Perceptual Attributes", "Perceptual Metric Between Two Point Clouds", "Quality Score Prediction", "Random Forest Regressor", "Reduced Reference Method", "Reference Point Cloud", "Standard Deviation", "Statistical Properties", "Suggested Quality Assessment Method", "Visual Quality", "Point Cloud Compression", "Measurement", "Geometry", "Visualization", "Solid Modeling", "Three Dimensional Displays", "Feature Extraction", "Mahalanobis Distance", "Point Clouds", "Objective Quality Metrics", "Reduced Reference" ], "authors": [ { "affiliation": "Mohammed V University in Rabat,Morocco", "fullName": "Abdelouahed Laazoufi", "givenName": "Abdelouahed", "surname": "Laazoufi", "__typename": "ArticleAuthorType" }, { "affiliation": "Mohammed V University in Rabat,FLSH,Morocco", "fullName": "Mohammed El Hassouni", "givenName": "Mohammed El", "surname": "Hassouni", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Burgundy,France", "fullName": "Hocine Cherifi", "givenName": "Hocine", "surname": "Cherifi", "__typename": "ArticleAuthorType" } ], "idPrefix": "sitis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "616-621", "year": "2022", "issn": null, "isbn": "978-1-6654-6495-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "649500a608", "articleId": "1MeoKfYlQmA", "__typename": "AdjacentArticleType" }, "next": { "fno": "649500a622", "articleId": "1MeoMMkuwBW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200d112", "title": "OMNet: Learning Overlapping Mask for Partial-to-Partial Point Cloud Registration", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200d112/1BmH817i3jq", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200f479", "title": "SnowflakeNet: Point Cloud Completion by Snowflake Point Deconvolution with Skip-Transformer", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f479/1BmL45zCYda", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09756929", "title": "Perceptual Quality Assessment of Colored 3D Point Clouds", "doi": null, "abstractUrl": "/journal/tg/5555/01/09756929/1Cxva6pb2iA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600v1147", "title": "No-Reference Point Cloud Quality Assessment via Domain Adaptation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600v1147/1H0LihR7AqI", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600b298", "title": ": Joint Point Interaction-Dimension Search for 3D Point Cloud", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600b298/1KxVBd77E8o", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09106005", "title": "Towards a Point Cloud Structural Similarity Metric", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09106005/1kwqDOtnjFK", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09106052", "title": "Coarse to Fine Rate Control For Region-Based 3D Point Cloud Compression", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09106052/1kwqNvqK4qk", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2021/4989/0/09455967", "title": "Deep Learning-Based Quality Assessment Of 3d Point Clouds Without Reference", "doi": null, "abstractUrl": "/proceedings-article/icmew/2021/09455967/1uCgpRSQhCE", "parentPublication": { "id": "proceedings/icmew/2021/4989/0", "title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpbd&is/2021/1327/0/09658452", "title": "2D-3DMatchingNet: Multimodal Point Completion with 2D Geometry Matching", "doi": null, "abstractUrl": "/proceedings-article/hpbd&is/2021/09658452/1zRFmc9ALyo", "parentPublication": { "id": "proceedings/hpbd&is/2021/1327/0", "title": "2021 International Conference on High Performance Big Data and Intelligent Systems (HPBD&IS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1wzs0vrjyWQ", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yVzNTyE0WQ", "doi": "10.1109/CVPRW53098.2021.00054", "title": "Perceptual Image Quality Assessment with Transformers", "normalizedTitle": "Perceptual Image Quality Assessment with Transformers", "abstract": "In this paper, we propose an image quality transformer (IQT) that successfully applies a transformer architecture to a perceptual full-reference image quality assessment (IQA) task. Perceptual representation becomes more important in image quality assessment. In this context, we extract the perceptual feature representations from each of input images using a convolutional neural network (CNN) back-bone. The extracted feature maps are fed into the transformer encoder and decoder in order to compare a reference and distorted images. Following an approach of the transformer-based vision models [18], [55], we use extra learnable quality embedding and position embedding. The output of the transformer is passed to a prediction head in order to predict a final quality score. The experimental results show that our proposed model has an outstanding performance for the standard IQA datasets. For a large-scale IQA dataset containing output images of generative model, our model also shows the promising results. The proposed IQT was ranked first among 13 participants in the NTIRE 2021 perceptual image quality assessment challenge [23]. Our work will be an opportunity to further expand the approach for the perceptual IQA task.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose an image quality transformer (IQT) that successfully applies a transformer architecture to a perceptual full-reference image quality assessment (IQA) task. Perceptual representation becomes more important in image quality assessment. In this context, we extract the perceptual feature representations from each of input images using a convolutional neural network (CNN) back-bone. The extracted feature maps are fed into the transformer encoder and decoder in order to compare a reference and distorted images. Following an approach of the transformer-based vision models [18], [55], we use extra learnable quality embedding and position embedding. The output of the transformer is passed to a prediction head in order to predict a final quality score. The experimental results show that our proposed model has an outstanding performance for the standard IQA datasets. For a large-scale IQA dataset containing output images of generative model, our model also shows the promising results. The proposed IQT was ranked first among 13 participants in the NTIRE 2021 perceptual image quality assessment challenge [23]. Our work will be an opportunity to further expand the approach for the perceptual IQA task.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose an image quality transformer (IQT) that successfully applies a transformer architecture to a perceptual full-reference image quality assessment (IQA) task. Perceptual representation becomes more important in image quality assessment. In this context, we extract the perceptual feature representations from each of input images using a convolutional neural network (CNN) back-bone. The extracted feature maps are fed into the transformer encoder and decoder in order to compare a reference and distorted images. Following an approach of the transformer-based vision models [18], [55], we use extra learnable quality embedding and position embedding. The output of the transformer is passed to a prediction head in order to predict a final quality score. The experimental results show that our proposed model has an outstanding performance for the standard IQA datasets. For a large-scale IQA dataset containing output images of generative model, our model also shows the promising results. The proposed IQT was ranked first among 13 participants in the NTIRE 2021 perceptual image quality assessment challenge [23]. Our work will be an opportunity to further expand the approach for the perceptual IQA task.", "fno": "489900a433", "keywords": [ "Approximation Theory", "Computer Vision", "Distortion", "Feature Extraction", "Image Representation", "Neural Nets", "Object Detection", "Regression Analysis", "Perceptual Image Quality Assessment Challenge", "Perceptual IQA Task", "Image Quality Transformer", "IQT", "Transformer Architecture", "Perceptual Full Reference Image Quality Assessment Task", "Perceptual Feature Representations", "Input Images", "Convolutional Neural Network Back Bone", "Extracted Feature Maps", "Transformer Encoder", "Decoder", "Distorted Images", "Transformer Based Vision Models", "Extra Learnable Quality", "Position Embedding", "Final Quality Score", "Standard IQA Datasets", "Large Scale IQA Dataset", "CNN Back Bone", "Image Quality", "Measurement", "Image Resolution", "Head", "Feature Extraction", "Quality Assessment", "Pattern Recognition" ], "authors": [ { "affiliation": "LG Electronics,Seoul,Korea", "fullName": "Manri Cheon", "givenName": "Manri", "surname": "Cheon", "__typename": "ArticleAuthorType" }, { "affiliation": "LG Electronics,Seoul,Korea", "fullName": "Sung-Jun Yoon", "givenName": "Sung-Jun", "surname": "Yoon", "__typename": "ArticleAuthorType" }, { "affiliation": "LG Electronics,Seoul,Korea", "fullName": "Byungyeon Kang", "givenName": "Byungyeon", "surname": "Kang", "__typename": "ArticleAuthorType" }, { "affiliation": "LG Electronics,Seoul,Korea", "fullName": "Junwoo Lee", "givenName": "Junwoo", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "433-442", "year": "2021", "issn": null, "isbn": "978-1-6654-4899-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "489900a424", "articleId": "1yVA1Sl4tZm", "__typename": "AdjacentArticleType" }, "next": { "fno": "489900a443", "articleId": "1yXsEB71mg0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dcc/2012/4656/0/4656a406", "title": "P^2SNR: Perceptual Full-Reference Image Quality Assessment for JPEG2000", "doi": null, "abstractUrl": "/proceedings-article/dcc/2012/4656a406/12OmNvDI3NA", "parentPublication": { "id": "proceedings/dcc/2012/4656/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2015/7082/0/07177436", "title": "Structure-preserving Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icme/2015/07177436/12OmNwJPN1F", "parentPublication": { "id": "proceedings/icme/2015/7082/0", "title": "2015 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500d989", "title": "No-Reference Image Quality Assessment via Transformers, Relative Ranking, and Self-Consistency", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500d989/1B1417PUbkI", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a939", "title": "Conformer and Blind Noisy Students for Improved Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a939/1G562YGgmze", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900b190", "title": "MANIQA: Multi-dimension Attention Network for No-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900b190/1G56mBh1R5e", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a950", "title": "NTIRE 2022 Challenge on Perceptual Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a950/1G56zKFMbjG", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900b804", "title": "Image Quality Assessment with Transformers and Multi-Metric Fusion Modules", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900b804/1G57ferfqww", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2022/9744/0/974400a861", "title": "Multi-Level Feature Aggregation Network for Full-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/ictai/2022/974400a861/1MrFYFNlVq8", "parentPublication": { "id": "proceedings/ictai/2022/9744/0", "title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100b953", "title": "Saliency-Guided Transformer Network combined with Local Embedding for No-Reference Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100b953/1yNimG9PBhm", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900a677", "title": "NTIRE 2021 Challenge on Perceptual Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900a677/1yZ3QigxUoo", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzDvSnQ", "title": "Volume Visualization and Graphics, IEEE Symposium on", "acronym": "vv", "groupId": "1000808", "volume": "0", "displayVolume": "0", "year": "1998", "__typename": "ProceedingType" }, "article": { "id": "12OmNBRsVxg", "doi": "10.1109/SVV.1998.729585", "title": "Adaptive Perspective Ray Casting", "normalizedTitle": "Adaptive Perspective Ray Casting", "abstract": "we present a method to accurately and efficiently perform perspective volumetric ray casting of uniform regular datasets, called Exponential-Region (ER) Perspective. Unlike previous methods which undersample, oversample, or approximate the data, our method near uniformly samples the data throughout the viewing volume. In addition, it gains algorithmic advantages from a regular sampling pattern and cache-coherent read access, making it an algorithm well suited for implementation on hardware architectures for volume rendering. We qualify the algorithm by its filtering characteristics and demonstrate its effectiveness by contrasting its antialiasing quality and timing with other perspective ray casting methods.", "abstracts": [ { "abstractType": "Regular", "content": "we present a method to accurately and efficiently perform perspective volumetric ray casting of uniform regular datasets, called Exponential-Region (ER) Perspective. Unlike previous methods which undersample, oversample, or approximate the data, our method near uniformly samples the data throughout the viewing volume. In addition, it gains algorithmic advantages from a regular sampling pattern and cache-coherent read access, making it an algorithm well suited for implementation on hardware architectures for volume rendering. We qualify the algorithm by its filtering characteristics and demonstrate its effectiveness by contrasting its antialiasing quality and timing with other perspective ray casting methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "we present a method to accurately and efficiently perform perspective volumetric ray casting of uniform regular datasets, called Exponential-Region (ER) Perspective. Unlike previous methods which undersample, oversample, or approximate the data, our method near uniformly samples the data throughout the viewing volume. In addition, it gains algorithmic advantages from a regular sampling pattern and cache-coherent read access, making it an algorithm well suited for implementation on hardware architectures for volume rendering. We qualify the algorithm by its filtering characteristics and demonstrate its effectiveness by contrasting its antialiasing quality and timing with other perspective ray casting methods.", "fno": "91800055", "keywords": [ "Adaptive Supersampling", "Perspective Ray Casting", "Volume Rendering", "Volume Rendering Hardware" ], "authors": [ { "affiliation": null, "fullName": "Ingmar Bitter", "givenName": "Ingmar", "surname": "Bitter", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Arie Kaufman", "givenName": "Arie", "surname": "Kaufman", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Baoquan Chen", "givenName": "Baoquan", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Frank Dachille", "givenName": "Frank", "surname": "Dachille", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kevin Kreeger", "givenName": "Kevin", "surname": "Kreeger", "__typename": "ArticleAuthorType" } ], "idPrefix": "vv", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1998-10-01T00:00:00", "pubType": "proceedings", "pages": "55-62", "year": "1998", "issn": null, "isbn": "1-58113-105-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "91800047", "articleId": "12OmNyLiuwI", "__typename": "AdjacentArticleType" }, "next": { "fno": "91800063", "articleId": "12OmNzJbQVc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNylsZKi", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "1998", "__typename": "ProceedingType" }, "article": { "id": "12OmNyoAA7g", "doi": "10.1109/VISUAL.1998.745310", "title": "Accelerated Ray-Casting for Curvilinear Volumes", "normalizedTitle": "Accelerated Ray-Casting for Curvilinear Volumes", "abstract": "We present an ef.cient and robust ray-casting algorithm for directly rendering a curvilinear volume of arbitrarily-shaped cells. We designed the algorithm to alleviate the consumption of CPU power and memory space. By incorporating the essence of the projection paradigm into the ray-casting process, we have successfully accelerated the ray traversal through the grid and data interpolations at sample points. Our algorithm also overcomes the conventional limitation requiring the cells to be convex. Application of this algorithm to several commonly-used curvilinear data sets has produced a favorable performance when compared with recently reported algorithms.", "abstracts": [ { "abstractType": "Regular", "content": "We present an ef.cient and robust ray-casting algorithm for directly rendering a curvilinear volume of arbitrarily-shaped cells. We designed the algorithm to alleviate the consumption of CPU power and memory space. By incorporating the essence of the projection paradigm into the ray-casting process, we have successfully accelerated the ray traversal through the grid and data interpolations at sample points. Our algorithm also overcomes the conventional limitation requiring the cells to be convex. Application of this algorithm to several commonly-used curvilinear data sets has produced a favorable performance when compared with recently reported algorithms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present an ef.cient and robust ray-casting algorithm for directly rendering a curvilinear volume of arbitrarily-shaped cells. We designed the algorithm to alleviate the consumption of CPU power and memory space. By incorporating the essence of the projection paradigm into the ray-casting process, we have successfully accelerated the ray traversal through the grid and data interpolations at sample points. Our algorithm also overcomes the conventional limitation requiring the cells to be convex. Application of this algorithm to several commonly-used curvilinear data sets has produced a favorable performance when compared with recently reported algorithms.", "fno": "91760247", "keywords": [ "Volume Visualization", "Volume Rendering", "Irregular Grid", "Curvilinear Grid", "Ray Casting", "Parallel Rendering", "Dynamic Simulation" ], "authors": [ { "affiliation": "Bell Laboratories, Lucent Technologies", "fullName": "Lichan Hong", "givenName": "Lichan", "surname": "Hong", "__typename": "ArticleAuthorType" }, { "affiliation": "State University of New York at Stony Brook", "fullName": "Arie Kaufman", "givenName": "Arie", "surname": "Kaufman", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1998-10-01T00:00:00", "pubType": "proceedings", "pages": "247", "year": "1998", "issn": null, "isbn": "0-8186-9176-x", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00745351", "articleId": "12OmNBRsVxU", "__typename": "AdjacentArticleType" }, "next": { "fno": "91760255", "articleId": "12OmNzTYC1D", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dagstuhl/1997/0503/0/05030124", "title": "Notes on Computational-Space-Based Ray-Casting for Curvilinear Volumes", "doi": null, "abstractUrl": "/proceedings-article/dagstuhl/1997/05030124/12OmNAXglN4", "parentPublication": { "id": "proceedings/dagstuhl/1997/0503/0", "title": "Dagstuhl '97 - Scientific Visualization Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2008/1966/0/04475452", "title": "Efficient Rendering of Extrudable Curvilinear Volumes", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2008/04475452/12OmNBKW9AV", "parentPublication": { "id": "proceedings/pacificvis/2008/1966/0", "title": "IEEE Pacific Visualization Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vv/1998/9180/0/91800055", "title": "Adaptive Perspective Ray Casting", "doi": null, "abstractUrl": "/proceedings-article/vv/1998/91800055/12OmNBRsVxg", "parentPublication": { "id": "proceedings/vv/1998/9180/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2001/7200/0/7200westermann", "title": "Accelerated Volume Ray-Casting using Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200westermann/12OmNCbU30D", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/58970063", "title": "High Performance Presence-Accelerated Ray Casting", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970063/12OmNqJ8tac", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1995/7187/0/71870061", "title": "Splatting of curvilinear volumes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1995/71870061/12OmNwCsdPK", "parentPublication": { "id": "proceedings/ieee-vis/1995/7187/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uksim/2008/3114/0/3114a372", "title": "A Particle Modeling for Rendering Irregular Volumes", "doi": null, "abstractUrl": "/proceedings-article/uksim/2008/3114a372/12OmNyRg4uB", "parentPublication": { "id": "proceedings/uksim/2008/3114/0", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1997/02/v0142", "title": "The Lazy Sweep Ray Casting Algorithm for Rendering Irregular Grids", "doi": null, "abstractUrl": "/journal/tg/1997/02/v0142/13rRUxASu0A", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1999/04/v0322", "title": "Fast Projection-Based Ray-Casting Algorithm for Rendering Curvilinear Volumes", "doi": null, "abstractUrl": "/journal/tg/1999/04/v0322/13rRUyY294r", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dagstuhl/1997/0503/0/01423109", "title": "Notes on Computational-Space-Based Ray-Casting for Curvilinear Volumes", "doi": null, "abstractUrl": "/proceedings-article/dagstuhl/1997/01423109/1h0N2FRU8Tu", "parentPublication": { "id": "proceedings/dagstuhl/1997/0503/0", "title": "Dagstuhl '97 - Scientific Visualization Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCf1Dp1", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2002", "__typename": "ProceedingType" }, "article": { "id": "12OmNyyeWwh", "doi": "10.1109/VISUAL.2002.1183776", "title": "A New Object-Order Ray-Casting Algorithm", "normalizedTitle": "A New Object-Order Ray-Casting Algorithm", "abstract": "Many direct volume rendering algorithms have been proposed during the last decade to render 256 3 voxels interactively. However a lot of limitations are inherent to all of them, like low- quality images, a small viewport size or a fixed classification. In contrast, interactive high quality algorithms are still a challenge nowadays. We introduce here an efficient and accurate technique called object-order ray-casting that can achieve up to 10 fps on current workstations. Like usual ray-casting, colors and opacities are evenly sampled along the ray, but now within a new object- order algorithm. Thus, it allows to combine the main advantages of both worlds in term of speed and quality. We also describe an efficient hidden volume removal technique to compensate for the loss of early ray termination.", "abstracts": [ { "abstractType": "Regular", "content": "Many direct volume rendering algorithms have been proposed during the last decade to render 256 3 voxels interactively. However a lot of limitations are inherent to all of them, like low- quality images, a small viewport size or a fixed classification. In contrast, interactive high quality algorithms are still a challenge nowadays. We introduce here an efficient and accurate technique called object-order ray-casting that can achieve up to 10 fps on current workstations. Like usual ray-casting, colors and opacities are evenly sampled along the ray, but now within a new object- order algorithm. Thus, it allows to combine the main advantages of both worlds in term of speed and quality. We also describe an efficient hidden volume removal technique to compensate for the loss of early ray termination.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Many direct volume rendering algorithms have been proposed during the last decade to render 256 3 voxels interactively. However a lot of limitations are inherent to all of them, like low- quality images, a small viewport size or a fixed classification. In contrast, interactive high quality algorithms are still a challenge nowadays. We introduce here an efficient and accurate technique called object-order ray-casting that can achieve up to 10 fps on current workstations. Like usual ray-casting, colors and opacities are evenly sampled along the ray, but now within a new object- order algorithm. Thus, it allows to combine the main advantages of both worlds in term of speed and quality. We also describe an efficient hidden volume removal technique to compensate for the loss of early ray termination.", "fno": "7498mora", "keywords": [ "Volume Rendering", "Scientific Visualization", "Medical Imaging", "Ray Tracing" ], "authors": [ { "affiliation": "IRIT", "fullName": "Benjamin Mora", "givenName": "Benjamin", "surname": "Mora", "__typename": "ArticleAuthorType" }, { "affiliation": "IRIT", "fullName": "Jean-Pierre Jessel", "givenName": "Jean-Pierre", "surname": "Jessel", "__typename": "ArticleAuthorType" }, { "affiliation": "IRIT", "fullName": "René Caubet", "givenName": "René", "surname": "Caubet", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2002-10-01T00:00:00", "pubType": "proceedings", "pages": "null", "year": "2002", "issn": "1070-2385", "isbn": "0-7803-7498-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7498wan", "articleId": "12OmNqG0SMC", "__typename": "AdjacentArticleType" }, "next": { "fno": "7498lu", "articleId": "12OmNy9Prft", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vv/1998/9180/0/91800055", "title": "Adaptive Perspective Ray Casting", "doi": null, "abstractUrl": "/proceedings-article/vv/1998/91800055/12OmNBRsVxg", "parentPublication": { "id": "proceedings/vv/1998/9180/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipps/1995/7074/0/70740707", "title": "An optimal parallel algorithm for volume ray casting", "doi": null, "abstractUrl": "/proceedings-article/ipps/1995/70740707/12OmNxd4tyh", "parentPublication": { "id": "proceedings/ipps/1995/7074/0", "title": "Proceedings of 9th International Parallel Processing Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/frontiers/1995/6965/0/69650238", "title": "An optimal parallel algorithm for volume ray casting", "doi": null, "abstractUrl": "/proceedings-article/frontiers/1995/69650238/12OmNxisQY8", "parentPublication": { "id": "proceedings/frontiers/1995/6965/0", "title": "Frontiers of Massively Parallel Processing, Symposium on the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2011/4602/0/4602a084", "title": "CUDA-Based Volume Ray-Casting Using Cubic B-spline", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2011/4602a084/12OmNyaXPSs", "parentPublication": { "id": "proceedings/icvrv/2011/4602/0", "title": "2011 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1998/9176/0/91760247", "title": "Accelerated Ray-Casting for Curvilinear Volumes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1998/91760247/12OmNyoAA7g", "parentPublication": { "id": "proceedings/ieee-vis/1998/9176/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300044", "title": "Hardware-Based Ray Casting for Tetrahedral Meshes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300044/12OmNzXnNw2", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010061525", "title": "Fast High-Quality Volume Ray Casting with Virtual Samplings", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010061525/13rRUxAATgs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1997/02/v0142", "title": "The Lazy Sweep Ray Casting Algorithm for Rendering Irregular Grids", "doi": null, "abstractUrl": "/journal/tg/1997/02/v0142/13rRUxASu0A", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2008/05/mcg2008050066", "title": "Dynamic Shader Generation for GPU-Based Multi-Volume Ray Casting", "doi": null, "abstractUrl": "/magazine/cg/2008/05/mcg2008050066/13rRUxN5evD", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1999/04/v0322", "title": "Fast Projection-Based Ray-Casting Algorithm for Rendering Curvilinear Volumes", "doi": null, "abstractUrl": "/journal/tg/1999/04/v0322/13rRUyY294r", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCbCrVD", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "1995", "__typename": "ProceedingType" }, "article": { "id": "12OmNqI04HL", "doi": "10.1109/VISUAL.1995.485146", "title": "Unsteady Flow Volumes", "normalizedTitle": "Unsteady Flow Volumes", "abstract": "Flow volumes [1] are extended for use in unsteady (time-dependent) flows. The resulting unsteady flow volumes are the 3 dimensional analog of streaklines. There are few examples where methods other than particle tracing have been used to visualize time varying flows. Since particle paths can become convoluted in time there are additional considerations to be made when extending any visualization technique to unsteady flows. We will present some solutions to the problems which occur in subdivision, rendering, and system design. We will apply the unsteady flow volumes to a variety of field types including moving multi-zoned curvilinear grids.", "abstracts": [ { "abstractType": "Regular", "content": "Flow volumes [1] are extended for use in unsteady (time-dependent) flows. The resulting unsteady flow volumes are the 3 dimensional analog of streaklines. There are few examples where methods other than particle tracing have been used to visualize time varying flows. Since particle paths can become convoluted in time there are additional considerations to be made when extending any visualization technique to unsteady flows. We will present some solutions to the problems which occur in subdivision, rendering, and system design. We will apply the unsteady flow volumes to a variety of field types including moving multi-zoned curvilinear grids.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Flow volumes [1] are extended for use in unsteady (time-dependent) flows. The resulting unsteady flow volumes are the 3 dimensional analog of streaklines. There are few examples where methods other than particle tracing have been used to visualize time varying flows. Since particle paths can become convoluted in time there are additional considerations to be made when extending any visualization technique to unsteady flows. We will present some solutions to the problems which occur in subdivision, rendering, and system design. We will apply the unsteady flow volumes to a variety of field types including moving multi-zoned curvilinear grids.", "fno": "71870329", "keywords": [ "Unsteady", "Flow", "Vector Field", "Visualization", "Streakline" ], "authors": [ { "affiliation": "Lawrence Livermore National Laboratory", "fullName": "Barry G. Becker", "givenName": "Barry G.", "surname": "Becker", "__typename": "ArticleAuthorType" }, { "affiliation": "Lawrence Livermore National Laboratory", "fullName": "Nelson L. Max", "givenName": "Nelson L.", "surname": "Max", "__typename": "ArticleAuthorType" }, { "affiliation": "NASA Ames Research Center", "fullName": "David A. Lane", "givenName": "David A.", "surname": "Lane", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1995-10-01T00:00:00", "pubType": "proceedings", "pages": "329", "year": "1995", "issn": "1070-2385", "isbn": "0-8186-7187-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "71870321", "articleId": "12OmNzdoN5y", "__typename": "AdjacentArticleType" }, "next": { "fno": "71870338", "articleId": "12OmNwtWfE8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqI04I8", "title": "Intelligent Systems Design and Applications, International Conference on", "acronym": "isda", "groupId": "1001454", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNzmLxKy", "doi": "10.1109/ISDA.2009.147", "title": "A New Scheme for Vision Based Flying Vehicle Detection Using Motion Flow Vectors Classification", "normalizedTitle": "A New Scheme for Vision Based Flying Vehicle Detection Using Motion Flow Vectors Classification", "abstract": "This paper presents a vision based scheme for detecting flying vehicle using a new feature extraction and correspondence algorithm as well as a motion flow vectors classifier. The base of detection is to classify the motion flow vectors of object and scene at two video sequences from a mobile monocular CCD camera. For this purpose, we introduce a method to extract robust features from fuzzified edges at first frame. Then, correspondence features are approximated at second video frame by a multi resolution feature matching processing based on edge Gaussian pyramids. In next stage, the estimated motion flow vectors classify into two object and scene classes using a supervised machine learning method based on MLPs neural network. In final step, the flying vehicle localize by approximating the contour of object based on a convex hull algorithm. Experimental results demonstrate that the proposed method has proper stability and reliability especially for the detection of aerial vehicle in applications with mobile camera.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a vision based scheme for detecting flying vehicle using a new feature extraction and correspondence algorithm as well as a motion flow vectors classifier. The base of detection is to classify the motion flow vectors of object and scene at two video sequences from a mobile monocular CCD camera. For this purpose, we introduce a method to extract robust features from fuzzified edges at first frame. Then, correspondence features are approximated at second video frame by a multi resolution feature matching processing based on edge Gaussian pyramids. In next stage, the estimated motion flow vectors classify into two object and scene classes using a supervised machine learning method based on MLPs neural network. In final step, the flying vehicle localize by approximating the contour of object based on a convex hull algorithm. Experimental results demonstrate that the proposed method has proper stability and reliability especially for the detection of aerial vehicle in applications with mobile camera.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a vision based scheme for detecting flying vehicle using a new feature extraction and correspondence algorithm as well as a motion flow vectors classifier. The base of detection is to classify the motion flow vectors of object and scene at two video sequences from a mobile monocular CCD camera. For this purpose, we introduce a method to extract robust features from fuzzified edges at first frame. Then, correspondence features are approximated at second video frame by a multi resolution feature matching processing based on edge Gaussian pyramids. In next stage, the estimated motion flow vectors classify into two object and scene classes using a supervised machine learning method based on MLPs neural network. In final step, the flying vehicle localize by approximating the contour of object based on a convex hull algorithm. Experimental results demonstrate that the proposed method has proper stability and reliability especially for the detection of aerial vehicle in applications with mobile camera.", "fno": "3872a175", "keywords": [ "Feature Extraction And Correspondence", "Flying Vehicle Detection", "Fuzzy Sets Theory", "ML Ps Neural Network", "Optical Flow" ], "authors": [ { "affiliation": null, "fullName": "Ali Taimori", "givenName": "Ali", "surname": "Taimori", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Alireza Behrad", "givenName": "Alireza", "surname": "Behrad", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Samira Sabouri", "givenName": "Samira", "surname": "Sabouri", "__typename": "ArticleAuthorType" } ], "idPrefix": "isda", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-11-01T00:00:00", "pubType": "proceedings", "pages": "175-180", "year": "2009", "issn": null, "isbn": "978-0-7695-3872-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3872a169", "articleId": "12OmNAtst46", "__typename": "AdjacentArticleType" }, "next": { "fno": "3872a181", "articleId": "12OmNqJZgFF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ifita/2009/3600/2/3600b696", "title": "Optical Flow Estimation Based on Predictive Vectors", "doi": null, "abstractUrl": "/proceedings-article/ifita/2009/3600b696/12OmNAnuTvO", "parentPublication": { "id": "proceedings/ifita/2009/3600/2", "title": "2009 International Forum on Information Technology and Applications (IFITA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nbis/2012/4779/0/4779a603", "title": "On Automatic Flying Distance Measurement on Ski Jumper's Motion Monitoring System", "doi": null, "abstractUrl": "/proceedings-article/nbis/2012/4779a603/12OmNBhHtc7", "parentPublication": { "id": "proceedings/nbis/2012/4779/0", "title": "2012 15th International Conference on Network-Based Information Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1995/7310/1/73100574", "title": "Motion compensation of motion vectors", "doi": null, "abstractUrl": "/proceedings-article/icip/1995/73100574/12OmNCmGO0V", "parentPublication": { "id": "proceedings/icip/1995/7310/1", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/06977422", "title": "Go with the Flow: Improving Multi-view Vehicle Detection with Motion Cues", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/06977422/12OmNwt5soc", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isise/2008/3494/1/3494a154", "title": "The Research on Vehicle Flow Detection in Complex Scenes", "doi": null, "abstractUrl": "/proceedings-article/isise/2008/3494a154/12OmNxETahV", "parentPublication": { "id": "proceedings/isise/2008/3494/1", "title": "2008 International Symposium on Information Science and Engieering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2013/4953/0/4953a838", "title": "Early Evaluation of Automatic Flying Distance Measurement on Ski Jumper's Motion Monitoring System", "doi": null, "abstractUrl": "/proceedings-article/aina/2013/4953a838/12OmNxGj9RX", "parentPublication": { "id": "proceedings/aina/2013/4953/0", "title": "2013 IEEE 27th International Conference on Advanced Information Networking and Applications (AINA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2013/2549/0/06746394", "title": "A Novel Vehicle Flow Detection Algorithm Based on Motion Saliency for Traffic Surveillance System", "doi": null, "abstractUrl": "/proceedings-article/cis/2013/06746394/12OmNyPQ4xu", "parentPublication": { "id": "proceedings/cis/2013/2549/0", "title": "2013 Ninth International Conference on Computational Intelligence and Security (CIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761183", "title": "Learning motion patterns in crowded scenes using motion flow field", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761183/12OmNyRPgMY", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/1/01394243", "title": "A statistical approach for object motion estimation with MPEG motion vectors", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394243/12OmNymjN1W", "parentPublication": { "id": "proceedings/icme/2004/8603/1", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/08/07983006", "title": "FlyCap: Markerless Motion Capture Using Multiple Autonomous Flying Cameras", "doi": null, "abstractUrl": "/journal/tg/2018/08/07983006/13rRUxYrbUO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqGA5i8", "title": "Computer Graphics and Applications, Pacific Conference on", "acronym": "pg", "groupId": "1000130", "volume": "0", "displayVolume": "0", "year": "1999", "__typename": "ProceedingType" }, "article": { "id": "12OmNAoDi54", "doi": "10.1109/PCCGA.1999.803349", "title": "Output Sensitive Extraction of Silhouettes from Polygonal Geometry", "normalizedTitle": "Output Sensitive Extraction of Silhouettes from Polygonal Geometry", "abstract": "An algorithm to allow real time interactive extraction and orthographic display of silhouettes of complex two-manifold polygonal object(s) is presented. An off-line preprocessing of all the edges of all polygons enables the efficient extraction of the silhouette edges in real time, once a viewing direction is prescribed. During the interactive session, the time complexity of extracting the silhouette edges is linear in the number of edges in the silhouette, and is typically in the order of O(pn),where n is the number of polygons in the scene. The time complexity of the preprocessing stage is linear in n.", "abstracts": [ { "abstractType": "Regular", "content": "An algorithm to allow real time interactive extraction and orthographic display of silhouettes of complex two-manifold polygonal object(s) is presented. An off-line preprocessing of all the edges of all polygons enables the efficient extraction of the silhouette edges in real time, once a viewing direction is prescribed. During the interactive session, the time complexity of extracting the silhouette edges is linear in the number of edges in the silhouette, and is typically in the order of O(pn),where n is the number of polygons in the scene. The time complexity of the preprocessing stage is linear in n.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An algorithm to allow real time interactive extraction and orthographic display of silhouettes of complex two-manifold polygonal object(s) is presented. An off-line preprocessing of all the edges of all polygons enables the efficient extraction of the silhouette edges in real time, once a viewing direction is prescribed. During the interactive session, the time complexity of extracting the silhouette edges is linear in the number of edges in the silhouette, and is typically in the order of O(pn),where n is the number of polygons in the scene. The time complexity of the preprocessing stage is linear in n.", "fno": "02930060", "keywords": [ "Silhouettes", "Real Time Display", "Gaussian Sphere", "Visibility Determination", "Range Searching" ], "authors": [ { "affiliation": "Technion, Israel Institute of Technology,", "fullName": "F. Benichou", "givenName": "F.", "surname": "Benichou", "__typename": "ArticleAuthorType" }, { "affiliation": "Technion, Israel Institute of Technology,", "fullName": "G. Elber", "givenName": "G.", "surname": "Elber", "__typename": "ArticleAuthorType" } ], "idPrefix": "pg", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1999-10-01T00:00:00", "pubType": "proceedings", "pages": "60", "year": "1999", "issn": null, "isbn": "0-7695-0293-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "02930050", "articleId": "12OmNylboAU", "__typename": "AdjacentArticleType" }, "next": { "fno": "02930070", "articleId": "12OmNBSSV8I", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqH9hnp", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNBQTJin", "doi": "10.1109/CVPR.2016.437", "title": "Joint Multiview Segmentation and Localization of RGB-D Images Using Depth-Induced Silhouette Consistency", "normalizedTitle": "Joint Multiview Segmentation and Localization of RGB-D Images Using Depth-Induced Silhouette Consistency", "abstract": "In this paper, we propose an RGB-D camera localization approach which takes an effective geometry constraint, i.e. silhouette consistency, into consideration. Unlike existing approaches which usually assume the silhouettes are provided, we consider more practical scenarios and generate the silhouettes for multiple views on the fly. To obtain a set of accurate silhouettes, precise camera poses are required to propagate segmentation cues across views. To perform better localization, accurate silhouettes are needed to constrain camera poses. Therefore the two problems are intertwined with each other and require a joint treatment. Facilitated by the available depth, we introduce a simple but effective silhouette consistency energy term that binds traditional appearance-based multiview segmentation cost and RGB-D frame-to-frame matching cost together. Optimization of the problem w.r.t. binary segmentation masks and camera poses naturally fits in the graph cut minimization framework and the Gauss-Newton non-linear least-squares method respectively. Experiments show that the proposed approach achieves state-of-the-arts performance on both tasks of image segmentation and camera localization.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose an RGB-D camera localization approach which takes an effective geometry constraint, i.e. silhouette consistency, into consideration. Unlike existing approaches which usually assume the silhouettes are provided, we consider more practical scenarios and generate the silhouettes for multiple views on the fly. To obtain a set of accurate silhouettes, precise camera poses are required to propagate segmentation cues across views. To perform better localization, accurate silhouettes are needed to constrain camera poses. Therefore the two problems are intertwined with each other and require a joint treatment. Facilitated by the available depth, we introduce a simple but effective silhouette consistency energy term that binds traditional appearance-based multiview segmentation cost and RGB-D frame-to-frame matching cost together. Optimization of the problem w.r.t. binary segmentation masks and camera poses naturally fits in the graph cut minimization framework and the Gauss-Newton non-linear least-squares method respectively. Experiments show that the proposed approach achieves state-of-the-arts performance on both tasks of image segmentation and camera localization.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose an RGB-D camera localization approach which takes an effective geometry constraint, i.e. silhouette consistency, into consideration. Unlike existing approaches which usually assume the silhouettes are provided, we consider more practical scenarios and generate the silhouettes for multiple views on the fly. To obtain a set of accurate silhouettes, precise camera poses are required to propagate segmentation cues across views. To perform better localization, accurate silhouettes are needed to constrain camera poses. Therefore the two problems are intertwined with each other and require a joint treatment. Facilitated by the available depth, we introduce a simple but effective silhouette consistency energy term that binds traditional appearance-based multiview segmentation cost and RGB-D frame-to-frame matching cost together. Optimization of the problem w.r.t. binary segmentation masks and camera poses naturally fits in the graph cut minimization framework and the Gauss-Newton non-linear least-squares method respectively. Experiments show that the proposed approach achieves state-of-the-arts performance on both tasks of image segmentation and camera localization.", "fno": "07780806", "keywords": [ "Cameras", "Optimization", "Image Color Analysis", "Image Segmentation", "Three Dimensional Displays", "Labeling", "Streaming Media" ], "authors": [ { "affiliation": null, "fullName": "Chi Zhang", "givenName": "Chi", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhiwei Li", "givenName": "Zhiwei", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Rui Cai", "givenName": "Rui", "surname": "Cai", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hongyang Chao", "givenName": "Hongyang", "surname": "Chao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yong Rui", "givenName": "Yong", "surname": "Rui", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-06-01T00:00:00", "pubType": "proceedings", "pages": "4031-4039", "year": "2016", "issn": "1063-6919", "isbn": "978-1-4673-8851-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07780805", "articleId": "12OmNxG1yVZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "07780807", "articleId": "12OmNrJiCRv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/8851d346", "title": "Consistency of Silhouettes and Their Duals", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851d346/12OmNC1Guhz", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visapp/2014/8133/3/07295106", "title": "Shape from silhouette in space, time and light domains", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07295106/12OmNxcMSbE", "parentPublication": { "id": "proceedings/visapp/2014/8133/2", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssiai/2016/9919/0/07459200", "title": "Indoor assistance for visually impaired people using a RGB-D camera", "doi": null, "abstractUrl": "/proceedings-article/ssiai/2016/07459200/12OmNzVoBLe", "parentPublication": { "id": "proceedings/ssiai/2016/9919/0", "title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04270186", "title": "Efficiently Determining Silhouette Consistency", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270186/12OmNzsJ7Jg", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/02/i0343", "title": "Silhouette Coherence for Camera Calibration under Circular Motion", "doi": null, "abstractUrl": "/journal/tp/2007/02/i0343/13rRUwbaqMD", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/06/ttp2011061161", "title": "Multiview Stereo and Silhouette Consistency via Convex Functionals over Convex Domains", "doi": null, "abstractUrl": "/journal/tp/2011/06/ttp2011061161/13rRUxAASXn", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/08/07321825", "title": "Simultaneous Localization and Appearance Estimation with a Consumer RGB-D Camera", "doi": null, "abstractUrl": "/journal/tg/2016/08/07321825/13rRUyv53Fv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percomw/2011/938/0/05766970", "title": "Silhouette classification using pixel and voxel features for improved elder monitoring in dynamic environments", "doi": null, "abstractUrl": "/proceedings-article/percomw/2011/05766970/17D45VsBU4s", "parentPublication": { "id": "proceedings/percomw/2011/938/0", "title": "2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops 2011). PerCom-Workshops 2011: 2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acpr/2017/3354/0/3354a049", "title": "Split and Merge for Accurate Plane Segmentation in RGB-D Images", "doi": null, "abstractUrl": "/proceedings-article/acpr/2017/3354a049/17D45Vw15t8", "parentPublication": { "id": "proceedings/acpr/2017/3354/0", "title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f949", "title": "Joint Texture and Geometry Optimization for RGB-D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f949/1m3ogA88vw4", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzSh1bk", "title": "Proceedings. Third International Conference on Image and Graphics", "acronym": "icig", "groupId": "1001790", "volume": "0", "displayVolume": "0", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNrIrPnj", "doi": "10.1109/ICIG.2004.28", "title": "A survey of silhouette detection techniques for non-photorealistic rendering", "normalizedTitle": "A survey of silhouette detection techniques for non-photorealistic rendering", "abstract": "Silhouettes play a critical role in non-photorealistic rendering. The effect of the NPR greatly depends on the silhouette performance. And it is also a key technology for real-time NPR applications. This paper introduces the most popular and latest techniques in silhouette detection. We classify and analyze them, and discuss them with the problem of visibility determination. After analyze the advantage and disadvantage of them, the working context of them is also concluded.", "abstracts": [ { "abstractType": "Regular", "content": "Silhouettes play a critical role in non-photorealistic rendering. The effect of the NPR greatly depends on the silhouette performance. And it is also a key technology for real-time NPR applications. This paper introduces the most popular and latest techniques in silhouette detection. We classify and analyze them, and discuss them with the problem of visibility determination. After analyze the advantage and disadvantage of them, the working context of them is also concluded.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Silhouettes play a critical role in non-photorealistic rendering. The effect of the NPR greatly depends on the silhouette performance. And it is also a key technology for real-time NPR applications. This paper introduces the most popular and latest techniques in silhouette detection. We classify and analyze them, and discuss them with the problem of visibility determination. After analyze the advantage and disadvantage of them, the working context of them is also concluded.", "fno": "01410476", "keywords": [ "Image Edge Detection", "Shape", "Space Technology", "Object Detection", "Application Software", "Laboratories", "Artificial Intelligence", "Computer Science", "Visual Effects", "Brushes" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Zhejiang Univ., Hangzhou, China", "fullName": "Wang Ao-yu", "givenName": null, "surname": "Wang Ao-yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Zhejiang Univ., Hangzhou, China", "fullName": "Tang Min", "givenName": null, "surname": "Tang Min", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Zhejiang Univ., Hangzhou, China", "fullName": "Dong Jin-xiang", "givenName": null, "surname": "Dong Jin-xiang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icig", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-01-01T00:00:00", "pubType": "proceedings", "pages": "434-437", "year": "2004", "issn": null, "isbn": "0-7695-2244-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01410475", "articleId": "12OmNweTvQd", "__typename": "AdjacentArticleType" }, "next": { "fno": "01410477", "articleId": "12OmNzh5z5v", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2012/4660/0/06402552", "title": "A non-photorealistic rendering framework with temporal coherence for augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402552/12OmNs59JGh", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2003/2028/0/20280424", "title": "OPENNPAR: A System for Developing, Programming, and Designing Non-Photorealistic Animation and Rendering", "doi": null, "abstractUrl": "/proceedings-article/pg/2003/20280424/12OmNwt5sll", "parentPublication": { "id": "proceedings/pg/2003/2028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2003/2028/0/20280472", "title": "INSPIRE: An Interactive Image Assisted Non-Photorealistic Rendering System", "doi": null, "abstractUrl": "/proceedings-article/pg/2003/20280472/12OmNyRPgRb", "parentPublication": { "id": "proceedings/pg/2003/2028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccit/2008/3407/1/3407a925", "title": "Non-photorealistic Directional Line Draw Rendering", "doi": null, "abstractUrl": "/proceedings-article/iccit/2008/3407a925/12OmNyqzM2l", "parentPublication": { "id": "iccit/2008/3407/1", "title": "Convergence Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2004/2178/0/21780215", "title": "Non-Photorealistic Outdoor Scene Rendering: Techniques and Application", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2004/21780215/12OmNzZmZtZ", "parentPublication": { "id": "proceedings/cgiv/2004/2178/0", "title": "Proceedings. International Conference on Computer Graphics, Imaging and Visualization, 2004. CGIV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icccs/2009/3906/0/3906a017", "title": "Non-photorealistic Rendering for 3D Mesh Models", "doi": null, "abstractUrl": "/proceedings-article/icccs/2009/3906a017/12OmNzgNXYX", "parentPublication": { "id": "proceedings/icccs/2009/3906/0", "title": "2009 International Conference on Computer and Communications Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04270186", "title": "Efficiently Determining Silhouette Consistency", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270186/12OmNzsJ7Jg", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030640", "title": "Silhouette Smoothing for Real-Time Rendering of Mesh Surfaces", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030640/13rRUwI5TQP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1994/02/i0150", "title": "The Visual Hull Concept for Silhouette-Based Image Understanding", "doi": null, "abstractUrl": "/journal/tp/1994/02/i0150/13rRUwInvzl", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2009/04/mcg2009040081", "title": "Non-photorealistic Rendering: Unleashing the Artist's Imagination [Graphically Speaking]", "doi": null, "abstractUrl": "/magazine/cg/2009/04/mcg2009040081/13rRUzp02qq", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzlUKpz", "title": "2006 IEEE International Conference on Multimedia and Expo", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNvA1hkx", "doi": "10.1109/ICME.2006.262704", "title": "Coarse-to-Fine Pedestrian Localization and Silhouette Extraction for the Gait Challenge Data Sets", "normalizedTitle": "Coarse-to-Fine Pedestrian Localization and Silhouette Extraction for the Gait Challenge Data Sets", "abstract": "This paper presents a localized coarse-to-fine algorithm for efficient and accurate pedestrian localization and silhouette extraction for the Gait Challenge data sets. The coarse detection phase is simple and fast. It locates the target quickly based on temporal differences and some knowledge on the human target. Based on this coarse detection, the fine dectection phase applies a robust background subtraction algorithm to the coarse target regions and the detection obtained is further processed to produce the final results. This algorithm has been tested on 285 outdoor sequences from the Gait Challenge data sets, with wide variety of capture conditions. The pedestrian targets are localized very well and silhouettes extracted resemble the manually labeled silhouettes closely.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a localized coarse-to-fine algorithm for efficient and accurate pedestrian localization and silhouette extraction for the Gait Challenge data sets. The coarse detection phase is simple and fast. It locates the target quickly based on temporal differences and some knowledge on the human target. Based on this coarse detection, the fine dectection phase applies a robust background subtraction algorithm to the coarse target regions and the detection obtained is further processed to produce the final results. This algorithm has been tested on 285 outdoor sequences from the Gait Challenge data sets, with wide variety of capture conditions. The pedestrian targets are localized very well and silhouettes extracted resemble the manually labeled silhouettes closely.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a localized coarse-to-fine algorithm for efficient and accurate pedestrian localization and silhouette extraction for the Gait Challenge data sets. The coarse detection phase is simple and fast. It locates the target quickly based on temporal differences and some knowledge on the human target. Based on this coarse detection, the fine dectection phase applies a robust background subtraction algorithm to the coarse target regions and the detection obtained is further processed to produce the final results. This algorithm has been tested on 285 outdoor sequences from the Gait Challenge data sets, with wide variety of capture conditions. The pedestrian targets are localized very well and silhouettes extracted resemble the manually labeled silhouettes closely.", "fno": "04036773", "keywords": [], "authors": [ { "affiliation": "The Edward S. Rogers Sr. Department of Electrical and Computer Engineering, University of Toronto, M5S 3G4, Canada. haiping@dsp.toronto.edu", "fullName": "Haiping Lu", "givenName": "Haiping", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": "The Edward S. Rogers Sr. Department of Electrical and Computer Engineering, University of Toronto, M5S 3G4, Canada. kostas@dsp.toronto.edu", "fullName": "K.N. Plataniotis", "givenName": "K.N.", "surname": "Plataniotis", "__typename": "ArticleAuthorType" }, { "affiliation": "The Edward S. Rogers Sr. Department of Electrical and Computer Engineering, University of Toronto, M5S 3G4, Canada. anv@dsp.toronto.edu", "fullName": "A.N. Venetsanopoulos", "givenName": "A.N.", "surname": "Venetsanopoulos", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-07-01T00:00:00", "pubType": "proceedings", "pages": "1009-1012", "year": "2006", "issn": null, "isbn": "1-4244-0366-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04036772", "articleId": "12OmNvjQ8Sb", "__typename": "AdjacentArticleType" }, "next": { "fno": "04036775", "articleId": "12OmNAolGRy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/aipr/2012/4558/0/06528205", "title": "Novel features for silhouette based gait recognition systems", "doi": null, "abstractUrl": "/proceedings-article/aipr/2012/06528205/12OmNA1VnuX", "parentPublication": { "id": "proceedings/aipr/2012/4558/0", "title": "2012 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2002/1602/0/16020366", "title": "Silhouette-Based Human Identification from Body Shape and Gait", "doi": null, "abstractUrl": "/proceedings-article/fg/2002/16020366/12OmNANTAwl", "parentPublication": { "id": "proceedings/fg/2002/1602/0", "title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2003/1950/1/195010663", "title": "Learning Pedestrian Models for Silhouette Refinement", "doi": null, "abstractUrl": "/proceedings-article/iccv/2003/195010663/12OmNAsBFHX", "parentPublication": { "id": "proceedings/iccv/2003/1950/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2010/6984/0/05540144", "title": "Silhouette transformation based on walking speed for gait identification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05540144/12OmNB8CiYw", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/2/01315233", "title": "Studies on silhouette quality and gait recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315233/12OmNqyDjl0", "parentPublication": { "id": "proceedings/cvpr/2004/2158/2", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/4/212840211", "title": "Simplest Representation Yet for Gait Recognition: Averaged Silhouette", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212840211/12OmNvzJFWf", "parentPublication": { "id": "proceedings/icpr/2004/2128/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsit/2009/4519/0/05234506", "title": "Robust post-processing strategy for gait silhouette", "doi": null, "abstractUrl": "/proceedings-article/iccsit/2009/05234506/12OmNz2C1z3", "parentPublication": { "id": "proceedings/iccsit/2009/4519/0", "title": "Computer Science and Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2006/2521/3/252130473", "title": "Abnormal Walking Gait Analysis Using Silhouette-Masked Flow Histograms", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252130473/12OmNzCF4Uq", "parentPublication": { "id": "proceedings/icpr/2006/2521/3", "title": "2006 18th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imvip/2011/0230/0/06167849", "title": "Analysis of Most Important Parts for Silhouette-Based Gait Recognition", "doi": null, "abstractUrl": "/proceedings-article/imvip/2011/06167849/12OmNzxPTO8", "parentPublication": { "id": "proceedings/imvip/2011/0230/0", "title": "2011 Irish Machine Vision and Image Processing Conference (IMVIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAsTgX3", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNyoiZ7z", "doi": "10.1109/CVPRW.2009.5204341", "title": "Shadow multiplexing for real-time silhouette extraction", "normalizedTitle": "Shadow multiplexing for real-time silhouette extraction", "abstract": "In this work we propose a real-time implementation for efficient extraction of multi-viewpoint silhouettes using a single camera. The method is based on our previously presented proof-of-concept shadow multiplexing method. We replace the cameras of a typical multi-camera setup with colored light sources and capture the multiplexed shadows. Because we only use a single camera, our setup is much cheaper than a classical setup, no camera synchronization is required, and less data has to be captured and processed. In addition, silhouette extraction is simple as we are segmenting the shadows instead of the texture of objects and background. Demultiplexing runs at 40 fps on current graphics hardware. Therefore this technique is suitable for real-time applications such as collision detection. We evaluate our method on both a real and a virtual setup, and show that our technique works for a large variety of objects and materials.", "abstracts": [ { "abstractType": "Regular", "content": "In this work we propose a real-time implementation for efficient extraction of multi-viewpoint silhouettes using a single camera. The method is based on our previously presented proof-of-concept shadow multiplexing method. We replace the cameras of a typical multi-camera setup with colored light sources and capture the multiplexed shadows. Because we only use a single camera, our setup is much cheaper than a classical setup, no camera synchronization is required, and less data has to be captured and processed. In addition, silhouette extraction is simple as we are segmenting the shadows instead of the texture of objects and background. Demultiplexing runs at 40 fps on current graphics hardware. Therefore this technique is suitable for real-time applications such as collision detection. We evaluate our method on both a real and a virtual setup, and show that our technique works for a large variety of objects and materials.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this work we propose a real-time implementation for efficient extraction of multi-viewpoint silhouettes using a single camera. The method is based on our previously presented proof-of-concept shadow multiplexing method. We replace the cameras of a typical multi-camera setup with colored light sources and capture the multiplexed shadows. Because we only use a single camera, our setup is much cheaper than a classical setup, no camera synchronization is required, and less data has to be captured and processed. In addition, silhouette extraction is simple as we are segmenting the shadows instead of the texture of objects and background. Demultiplexing runs at 40 fps on current graphics hardware. Therefore this technique is suitable for real-time applications such as collision detection. We evaluate our method on both a real and a virtual setup, and show that our technique works for a large variety of objects and materials.", "fno": "05204341", "keywords": [ "Feature Extraction", "Image Colour Analysis", "Image Sensors", "Virtual Reality", "Proof Of Concept Shadow Multiplexing Method", "Real Time Silhouette Extraction", "Single Camera", "Colored Light Sources", "Graphics Hardware", "Collision Detection", "Virtual Reality Applications", "Light Sources", "Image Reconstruction", "Layout", "Data Mining", "Digital Cameras", "Demultiplexing", "Graphics", "Hardware", "Rendering Computer Graphics", "Lenses" ], "authors": [ { "affiliation": "Hasselt University - tUL - IBBT, Expertise Centre for Digital Media, Belgium", "fullName": "Tom Cuypers", "givenName": "Tom", "surname": "Cuypers", "__typename": "ArticleAuthorType" }, { "affiliation": "Hasselt University - tUL - IBBT, Expertise Centre for Digital Media, Belgium", "fullName": "Yannick Francken", "givenName": "Yannick", "surname": "Francken", "__typename": "ArticleAuthorType" }, { "affiliation": "Hasselt University - tUL - IBBT, Expertise Centre for Digital Media, Belgium", "fullName": "Johannes Taelman", "givenName": "Johannes", "surname": "Taelman", "__typename": "ArticleAuthorType" }, { "affiliation": "Hasselt University - tUL - IBBT, Expertise Centre for Digital Media, Belgium", "fullName": "Philippe Bekaert", "givenName": "Philippe", "surname": "Bekaert", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-06-01T00:00:00", "pubType": "proceedings", "pages": "61-68", "year": "2009", "issn": "2160-7508", "isbn": "978-1-4244-3994-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05204325", "articleId": "12OmNvA1hja", "__typename": "AdjacentArticleType" }, "next": { "fno": "05204315", "articleId": "12OmNzl3WTL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2001/1143/1/00937517", "title": "Shadow carving", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/00937517/12OmNBSBk9I", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2006/2606/0/26060372", "title": "GPU Based Real-time Shadow Research", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2006/26060372/12OmNCcbE8T", "parentPublication": { "id": "proceedings/cgiv/2006/2606/0", "title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206614", "title": "A projector-camera setup for geometry-invariant frequency demultiplexing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206614/12OmNvoWV1H", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1994/6952/1/00413374", "title": "Active shape and depth extraction from shadow images", "doi": null, "abstractUrl": "/proceedings-article/icip/1994/00413374/12OmNweBUM3", "parentPublication": { "id": "proceedings/icip/1994/6952/3", "title": "Proceedings of 1st International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visapp/2014/8133/3/07295106", "title": "Shape from silhouette in space, time and light domains", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07295106/12OmNxcMSbE", "parentPublication": { "id": "proceedings/visapp/2014/8133/2", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2017/5738/0/08031605", "title": "Implicit Sphere Shadow Maps", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2017/08031605/12OmNxcMShN", "parentPublication": { "id": "proceedings/pacificvis/2017/5738/0", "title": "2017 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2017/2219/0/2219a238", "title": "Euclidean Distance Transform Soft Shadow Mapping", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2017/2219a238/12OmNxw5BsX", "parentPublication": { "id": "proceedings/sibgrapi/2017/2219/0", "title": "2017 30th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2014/4337/0/4337a378", "title": "3D Reconstruction by Fusioning Shadow and Silhouette Information", "doi": null, "abstractUrl": "/proceedings-article/crv/2014/4337a378/12OmNzfXawe", "parentPublication": { "id": "proceedings/crv/2014/4337/0", "title": "2014 Canadian Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1992/2855/0/00223128", "title": "Shadow identification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223128/12OmNzmclXa", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/06/07076609", "title": "More Efficient Virtual Shadow Maps for Many Lights", "doi": null, "abstractUrl": "/journal/tg/2015/06/07076609/13rRUyYSWsZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxH9X7L", "title": "2014 Canadian Conference on Computer and Robot Vision (CRV)", "acronym": "crv", "groupId": "1001794", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNzfXawe", "doi": "10.1109/CRV.2014.58", "title": "3D Reconstruction by Fusioning Shadow and Silhouette Information", "normalizedTitle": "3D Reconstruction by Fusioning Shadow and Silhouette Information", "abstract": "In this paper, we propose a new 3D reconstruction method using mainly the shadow and silhouette information of a moving object or person. This method is derived from the well-known Shape From Silhouettes (SFS) approach. A light source can be seen as a camera, which generates an image as a silhouette shadow. Based on this, we propose to replace a multicamera system of SFS by multi-infrared light sources while keeping the same procedure of Visual Hull reconstruction (VH). Therefore, our system consists of infrared light sources and one infrared camera. In this case, in addition to the object silhouette given by the camera, each light source generates an object shadow that reveals the object. Thus, as in SFS, the VH of a given object is reconstructed by intersecting the visual cones. Our method has many advantages compared to SFS and preliminary results, on synthetic and real scene images, showed that the system could be applied in several contexts.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a new 3D reconstruction method using mainly the shadow and silhouette information of a moving object or person. This method is derived from the well-known Shape From Silhouettes (SFS) approach. A light source can be seen as a camera, which generates an image as a silhouette shadow. Based on this, we propose to replace a multicamera system of SFS by multi-infrared light sources while keeping the same procedure of Visual Hull reconstruction (VH). Therefore, our system consists of infrared light sources and one infrared camera. In this case, in addition to the object silhouette given by the camera, each light source generates an object shadow that reveals the object. Thus, as in SFS, the VH of a given object is reconstructed by intersecting the visual cones. Our method has many advantages compared to SFS and preliminary results, on synthetic and real scene images, showed that the system could be applied in several contexts.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a new 3D reconstruction method using mainly the shadow and silhouette information of a moving object or person. This method is derived from the well-known Shape From Silhouettes (SFS) approach. A light source can be seen as a camera, which generates an image as a silhouette shadow. Based on this, we propose to replace a multicamera system of SFS by multi-infrared light sources while keeping the same procedure of Visual Hull reconstruction (VH). Therefore, our system consists of infrared light sources and one infrared camera. In this case, in addition to the object silhouette given by the camera, each light source generates an object shadow that reveals the object. Thus, as in SFS, the VH of a given object is reconstructed by intersecting the visual cones. Our method has many advantages compared to SFS and preliminary results, on synthetic and real scene images, showed that the system could be applied in several contexts.", "fno": "4337a378", "keywords": [ "Light Sources", "Cameras", "Image Reconstruction", "Three Dimensional Displays", "Shape", "Visualization", "Ellipsoids", "Infrared Light", "Shape From Silhouettes", "Visual Hull", "Shadow", "3 D Reconstruction" ], "authors": [ { "affiliation": null, "fullName": "Rafik Gouiaa", "givenName": "Rafik", "surname": "Gouiaa", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jean Meunier", "givenName": "Jean", "surname": "Meunier", "__typename": "ArticleAuthorType" } ], "idPrefix": "crv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-05-01T00:00:00", "pubType": "proceedings", "pages": "378-384", "year": "2014", "issn": null, "isbn": "978-1-4799-4337-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4337a371", "articleId": "12OmNz5JBPE", "__typename": "AdjacentArticleType" }, "next": { "fno": "4337a385", "articleId": "12OmNyq0zMG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2006/2606/0/26060372", "title": "GPU Based Real-time Shadow Research", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2006/26060372/12OmNCcbE8T", "parentPublication": { "id": "proceedings/cgiv/2006/2606/0", "title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dmdcm/2011/4413/0/4413a270", "title": "New Silhouette Detection Algorithm to Create Real-Time Volume Shadow", "doi": null, "abstractUrl": "/proceedings-article/dmdcm/2011/4413a270/12OmNCdk2DZ", "parentPublication": { "id": "proceedings/dmdcm/2011/4413/0", "title": "Digital Media and Digital Content Management, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsa/2007/2945/0/29450524", "title": "Simplified Shadow Volumes using Silhouette Level-of-Detail", "doi": null, "abstractUrl": "/proceedings-article/iccsa/2007/29450524/12OmNrkT7Ha", "parentPublication": { "id": "proceedings/iccsa/2007/2945/0", "title": "2007 International Conference on Computational Science and its Applications (ICCSA 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2006/2602/0/26020585", "title": "GPU Based Real-time Shadow Research in Large Ship-handling Simulator", "doi": null, "abstractUrl": "/proceedings-article/iv/2006/26020585/12OmNxeutbR", "parentPublication": { "id": "proceedings/iv/2006/2602/0", "title": "Tenth International Conference on Information Visualisation (IV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2017/2219/0/2219a238", "title": "Euclidean Distance Transform Soft Shadow Mapping", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2017/2219a238/12OmNxw5BsX", "parentPublication": { "id": "proceedings/sibgrapi/2017/2219/0", "title": "2017 30th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761016", "title": "3D reconstruction by combining shape from silhouette with stereo", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761016/12OmNyS6ROK", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204341", "title": "Shadow multiplexing for real-time silhouette extraction", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204341/12OmNyoiZ7z", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2014/4258/0/4258a001", "title": "3D Face Reconstruction from Video Using 3D Morphable Model and Silhouette", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2014/4258a001/12OmNz5JBYT", "parentPublication": { "id": "proceedings/sibgrapi/2014/4258/0", "title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2011/4353/2/05751119", "title": "The Synthesis of 3D Graphics and the Study on Various Shadow Effects", "doi": null, "abstractUrl": "/proceedings-article/icicta/2011/05751119/12OmNzBOhCu", "parentPublication": { "id": "icicta/2011/4353/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800i136", "title": "ARShadowGAN: Shadow Generative Adversarial Network for Augmented Reality in Single Light Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800i136/1m3ooi3wLOU", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyNQSGO", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNzsJ7Jg", "doi": "10.1109/CVPR.2007.383161", "title": "Efficiently Determining Silhouette Consistency", "normalizedTitle": "Efficiently Determining Silhouette Consistency", "abstract": "Volume intersection is a frequently used technique to solve the Shape-From-Silhouette problem, which constructs a 3D object estimate from a set of silhouettes taken with calibrated cameras. It is natural to develop an efficient algorithm to determine the consistency of a set of silhouettes before performing time-consuming reconstruction, so that inaccurate silhouettes can be omitted. In this paper we first present a fast algorithm to determine the consistency of three silhouettes from known (but arbitrary) viewing directions, assuming the projection is scaled orthographic. The temporal complexity of the algorithm is linear in the number of points of the silhouette boundaries. We further prove that a set of more than three convex silhouettes are consistent if and only if any three of them are consistent. Another possible application of our approach is to determine the miscalibrated cameras in a large camera system. A consistent subset of cameras can be determined on the fly and miscalibrated cameras can also be recalibrated at a coarse scale. Real and synthesized data are used to demonstrate our results.", "abstracts": [ { "abstractType": "Regular", "content": "Volume intersection is a frequently used technique to solve the Shape-From-Silhouette problem, which constructs a 3D object estimate from a set of silhouettes taken with calibrated cameras. It is natural to develop an efficient algorithm to determine the consistency of a set of silhouettes before performing time-consuming reconstruction, so that inaccurate silhouettes can be omitted. In this paper we first present a fast algorithm to determine the consistency of three silhouettes from known (but arbitrary) viewing directions, assuming the projection is scaled orthographic. The temporal complexity of the algorithm is linear in the number of points of the silhouette boundaries. We further prove that a set of more than three convex silhouettes are consistent if and only if any three of them are consistent. Another possible application of our approach is to determine the miscalibrated cameras in a large camera system. A consistent subset of cameras can be determined on the fly and miscalibrated cameras can also be recalibrated at a coarse scale. Real and synthesized data are used to demonstrate our results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Volume intersection is a frequently used technique to solve the Shape-From-Silhouette problem, which constructs a 3D object estimate from a set of silhouettes taken with calibrated cameras. It is natural to develop an efficient algorithm to determine the consistency of a set of silhouettes before performing time-consuming reconstruction, so that inaccurate silhouettes can be omitted. In this paper we first present a fast algorithm to determine the consistency of three silhouettes from known (but arbitrary) viewing directions, assuming the projection is scaled orthographic. The temporal complexity of the algorithm is linear in the number of points of the silhouette boundaries. We further prove that a set of more than three convex silhouettes are consistent if and only if any three of them are consistent. Another possible application of our approach is to determine the miscalibrated cameras in a large camera system. A consistent subset of cameras can be determined on the fly and miscalibrated cameras can also be recalibrated at a coarse scale. Real and synthesized data are used to demonstrate our results.", "fno": "04270186", "keywords": [], "authors": [ { "affiliation": "Institute for Advanced Computer Studies, University of Maryland, College Park, MD 20742. liyi@umiacs", "fullName": "Li Yi", "givenName": null, "surname": "Li Yi", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute for Advanced Computer Studies, University of Maryland, College Park, MD 20742. djacobs@umi", "fullName": "David W. Jacobs", "givenName": "David W.", "surname": "Jacobs", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-06-01T00:00:00", "pubType": "proceedings", "pages": "1-8", "year": "2007", "issn": null, "isbn": "1-4244-1179-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04270185", "articleId": "12OmNzTppy3", "__typename": "AdjacentArticleType" }, "next": { "fno": "04270187", "articleId": "12OmNwEJ0CO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2015/6964/0/07299010", "title": "Spherical embedding of inlier silhouette dissimilarities", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07299010/12OmNAKcNMw", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/07780806", "title": "Joint Multiview Segmentation and Localization of RGB-D Images Using Depth-Induced Silhouette Consistency", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/07780806/12OmNBQTJin", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dpvt/2006/2825/0/282500397", "title": "Visual Shapes of Silhouette Sets", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2006/282500397/12OmNCxtyM3", "parentPublication": { "id": "proceedings/3dpvt/2006/2825/0", "title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/2/01315233", "title": "Studies on silhouette quality and gait recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315233/12OmNqyDjl0", "parentPublication": { "id": "proceedings/cvpr/2004/2158/2", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030640", "title": "Silhouette Smoothing for Real-Time Rendering of Mesh Surfaces", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030640/13rRUwI5TQP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/02/i0343", "title": "Silhouette Coherence for Camera Calibration under Circular Motion", "doi": null, "abstractUrl": "/journal/tp/2007/02/i0343/13rRUwbaqMD", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/06/ttp2011061161", "title": "Multiview Stereo and Silhouette Consistency via Convex Functionals over Convex Domains", "doi": null, "abstractUrl": "/journal/tp/2011/06/ttp2011061161/13rRUxAASXn", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300e475", "title": "SiCloPe: Silhouette-Based Clothed People", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300e475/1gysaRMe7a8", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tmhi3ly74c", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tmhup6tRV6", "doi": "10.1109/ICPR48806.2021.9412708", "title": "Silhouette Body Measurement Benchmarks", "normalizedTitle": "Silhouette Body Measurement Benchmarks", "abstract": "Anthropometric body measurements are important for industrial design, garment fitting, medical diagnosis and ergonomics. A number of methods have been proposed to estimate the body measurements from images, but progress has been slow due to the lack of realistic and publicly available datasets. The existing works train and test on silhouettes of 3D body meshes obtained by fitting a human body model to the commercial CAESAR scans. In this work, we introduce the BODY-fit dataset that contains fitted meshes of 2,675 female and 1,474 male 3D body scans. We unify evaluation on the CAESAR-fit and BODY-fit datasets by computing body measurements from geodesic surface paths as the ground truth and by generating two-view silhouette images. We also introduce BODY-rgb - a realistic dataset of 86 male and 108 female subjects captured with an RGB camera and manually tape measured ground truth. We propose a simple yet effective deep CNN architecture as a baseline method which obtains competitive accuracy on the three datasets.", "abstracts": [ { "abstractType": "Regular", "content": "Anthropometric body measurements are important for industrial design, garment fitting, medical diagnosis and ergonomics. A number of methods have been proposed to estimate the body measurements from images, but progress has been slow due to the lack of realistic and publicly available datasets. The existing works train and test on silhouettes of 3D body meshes obtained by fitting a human body model to the commercial CAESAR scans. In this work, we introduce the BODY-fit dataset that contains fitted meshes of 2,675 female and 1,474 male 3D body scans. We unify evaluation on the CAESAR-fit and BODY-fit datasets by computing body measurements from geodesic surface paths as the ground truth and by generating two-view silhouette images. We also introduce BODY-rgb - a realistic dataset of 86 male and 108 female subjects captured with an RGB camera and manually tape measured ground truth. We propose a simple yet effective deep CNN architecture as a baseline method which obtains competitive accuracy on the three datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Anthropometric body measurements are important for industrial design, garment fitting, medical diagnosis and ergonomics. A number of methods have been proposed to estimate the body measurements from images, but progress has been slow due to the lack of realistic and publicly available datasets. The existing works train and test on silhouettes of 3D body meshes obtained by fitting a human body model to the commercial CAESAR scans. In this work, we introduce the BODY-fit dataset that contains fitted meshes of 2,675 female and 1,474 male 3D body scans. We unify evaluation on the CAESAR-fit and BODY-fit datasets by computing body measurements from geodesic surface paths as the ground truth and by generating two-view silhouette images. We also introduce BODY-rgb - a realistic dataset of 86 male and 108 female subjects captured with an RGB camera and manually tape measured ground truth. We propose a simple yet effective deep CNN architecture as a baseline method which obtains competitive accuracy on the three datasets.", "fno": "09412708", "keywords": [ "Anthropometry", "Biomedical Measurement", "Clothing", "Clothing Industry", "Convolutional Neural Nets", "Differential Geometry", "Ergonomics", "Feature Extraction", "Image Sensors", "Silhouette Body Measurement Benchmarks", "Anthropometric Body Measurements", "Garment Fitting", "Medical Diagnosis", "Ergonomics", "Realistic Datasets", "3 D Body Meshes", "Human Body Model", "Commercial CAESAR Scans", "BODY Fit Dataset", "Two View Silhouette Images", "BODY Rgb", "Realistic Dataset", "Measured Ground Truth", "Industrial Design", "CAESAR Fit Dataset", "Solid Modeling", "Three Dimensional Displays", "Ergonomics", "Fitting", "Computer Architecture", "Benchmark Testing", "Surface Fitting" ], "authors": [ { "affiliation": "Tampere University,Computing Sciences,Finland", "fullName": "Song Yan", "givenName": "Song", "surname": "Yan", "__typename": "ArticleAuthorType" }, { "affiliation": "NOMO Technologies Ltd,Espoo,Finland", "fullName": "Johan Wirta", "givenName": "Johan", "surname": "Wirta", "__typename": "ArticleAuthorType" }, { "affiliation": "Tampere University,Computing Sciences,Finland", "fullName": "Joni-Kristian Kämäräinen", "givenName": "Joni-Kristian", "surname": "Kämäräinen", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "7804-7809", "year": "2021", "issn": "1051-4651", "isbn": "978-1-7281-8808-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09411976", "articleId": "1tmhzKLUyUU", "__typename": "AdjacentArticleType" }, "next": { "fno": "09413114", "articleId": "1tmhPnfQnoA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2014/4985/0/06836115", "title": "Model-based anthropometry: Predicting measurements from 3D human scans in multiple poses", "doi": null, "abstractUrl": "/proceedings-article/wacv/2014/06836115/12OmNAnuTvV", "parentPublication": { "id": "proceedings/wacv/2014/4985/0", "title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dpvt/2006/2825/0/282500389", "title": "3D Skeleton-Based Body Pose Recovery", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2006/282500389/12OmNBLdKJ3", "parentPublication": { "id": "proceedings/3dpvt/2006/2825/0", "title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccairo/2017/6536/0/6536a045", "title": "CAD Modelling of Human Body for Robotics Applications", "doi": null, "abstractUrl": "/proceedings-article/iccairo/2017/6536a045/12OmNBTs7q4", "parentPublication": { "id": "proceedings/iccairo/2017/6536/0", "title": "2017 International Conference on Control, Artificial Intelligence, Robotics & Optimization (ICCAIRO)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981821", "title": "3D Human pose and shape estimation from multi-view imagery", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981821/12OmNwtn3o5", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dim/2005/2327/0/23270335", "title": "Extracting Main Modes of Human Body Shape Variation from 3-D Anthropometric Data", "doi": null, "abstractUrl": "/proceedings-article/3dim/2005/23270335/12OmNxwENJD", "parentPublication": { "id": "proceedings/3dim/2005/2327/0", "title": "3D Digital Imaging and Modeling, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a219", "title": "Human Body Measurement Estimation with Adversarial Augmentation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a219/1KYsrtwlRrG", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a279", "title": "Towards Accurate 3D Human Body Reconstruction from Silhouettes", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a279/1ezRD3vu1xu", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300k0967", "title": "Expressive Body Capture: 3D Hands, Face, and Body From a Single Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300k0967/1gyrtpLBnSE", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tcs/2021/2910/0/291000a559", "title": "Research of Effects of Intelligent Rope Skipping on BMI or Health-related Physical Fitness among Overweight Middle School Students and the Correlation", "doi": null, "abstractUrl": "/proceedings-article/tcs/2021/291000a559/1wRIbWcBYoE", "parentPublication": { "id": "proceedings/tcs/2021/2910/0", "title": "2021 International Conference on Information Technology and Contemporary Sports (TCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a689", "title": "KAMA: 3D Keypoint Aware Body Mesh Articulation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a689/1zWEaHHKCkM", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx7ouTY", "title": "Artificial Intelligence and Computational Intelligence, International Conference on", "acronym": "aici", "groupId": "1003069", "volume": "3", "displayVolume": "3", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNAXglTU", "doi": "10.1109/AICI.2010.307", "title": "Dynamic Mesh Optimization with Curvature Dependent Subdivision for Polygonized Implicit Surfaces with Sharp Features", "normalizedTitle": "Dynamic Mesh Optimization with Curvature Dependent Subdivision for Polygonized Implicit Surfaces with Sharp Features", "abstract": "Converting from implicit surfaces into polygons and further optimization are a preprocessing step in rendering implicit surfaces for visualization and other uses. This paper describes a simple method for accurate polygonization of implicit surfaces with sharp features. First, an initial coarse mesh is generated by traditional Marching Cubes (MC) method, Then the mesh is used to approximate the implicit surface with simultaneous control of mesh vertex positions, regularity and normals. The refined mesh finally converges to a limit mesh which represents a high quality approximation of the implicit surface. To reconstruct small surface features, the optimization process combines with a curvature dependent mesh adaptive subdivision. For analyzing how close the refined mesh approximates the implicit surface, two error metrics are applied. One measures the deviations of the mesh vertices from the implicit surface, while the other measures the deviations of the mesh normals from the implicit surface.", "abstracts": [ { "abstractType": "Regular", "content": "Converting from implicit surfaces into polygons and further optimization are a preprocessing step in rendering implicit surfaces for visualization and other uses. This paper describes a simple method for accurate polygonization of implicit surfaces with sharp features. First, an initial coarse mesh is generated by traditional Marching Cubes (MC) method, Then the mesh is used to approximate the implicit surface with simultaneous control of mesh vertex positions, regularity and normals. The refined mesh finally converges to a limit mesh which represents a high quality approximation of the implicit surface. To reconstruct small surface features, the optimization process combines with a curvature dependent mesh adaptive subdivision. For analyzing how close the refined mesh approximates the implicit surface, two error metrics are applied. One measures the deviations of the mesh vertices from the implicit surface, while the other measures the deviations of the mesh normals from the implicit surface.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Converting from implicit surfaces into polygons and further optimization are a preprocessing step in rendering implicit surfaces for visualization and other uses. This paper describes a simple method for accurate polygonization of implicit surfaces with sharp features. First, an initial coarse mesh is generated by traditional Marching Cubes (MC) method, Then the mesh is used to approximate the implicit surface with simultaneous control of mesh vertex positions, regularity and normals. The refined mesh finally converges to a limit mesh which represents a high quality approximation of the implicit surface. To reconstruct small surface features, the optimization process combines with a curvature dependent mesh adaptive subdivision. For analyzing how close the refined mesh approximates the implicit surface, two error metrics are applied. One measures the deviations of the mesh vertices from the implicit surface, while the other measures the deviations of the mesh normals from the implicit surface.", "fno": "4225c331", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Mingqiang Wei", "givenName": "Mingqiang", "surname": "Wei", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianhuang Wu", "givenName": "Jianhuang", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mingyong Pang", "givenName": "Mingyong", "surname": "Pang", "__typename": "ArticleAuthorType" } ], "idPrefix": "aici", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-10-01T00:00:00", "pubType": "proceedings", "pages": "331-335", "year": "2010", "issn": null, "isbn": "978-0-7695-4225-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4225c322", "articleId": "12OmNxG1yTL", "__typename": "AdjacentArticleType" }, "next": { "fno": "4225c336", "articleId": "12OmNzxyiHj", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/smi/2001/0853/0/08530062", "title": "Implicit Surfaces that Interpolate", "doi": null, "abstractUrl": "/proceedings-article/smi/2001/08530062/12OmNAZfxIC", "parentPublication": { "id": "proceedings/smi/2001/0853/0", "title": "Shape Modeling and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2001/1227/0/12270140", "title": "Sharp Features on Multiresolution Subdivision Surfaces", "doi": null, "abstractUrl": "/proceedings-article/pg/2001/12270140/12OmNBf94XV", "parentPublication": { "id": "proceedings/pg/2001/1227/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2001/0853/0/08530074", "title": "Dynamic Meshes for Accurate Polygonization of Implicit Surfaces with Sharp Features", "doi": null, "abstractUrl": "/proceedings-article/smi/2001/08530074/12OmNrJiCSr", "parentPublication": { "id": "proceedings/smi/2001/0853/0", "title": "Shape Modeling and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2006/2686/0/26860205", "title": "Robust adaptive meshes for implicit surfaces", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2006/26860205/12OmNxb5hwt", "parentPublication": { "id": "proceedings/sibgrapi/2006/2686/0", "title": "2006 19th Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2005/2379/0/23790124", "title": "Robust Particle Systems for Curvature Dependent Sampling of Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/smi/2005/23790124/12OmNyGbIbB", "parentPublication": { "id": "proceedings/smi/2005/2379/0", "title": "Proceedings. International Conference on Shape Modeling and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iptc/2010/4196/0/4196a220", "title": "Parallel Polygonization of Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/iptc/2010/4196a220/12OmNyQph0Z", "parentPublication": { "id": "proceedings/iptc/2010/4196/0", "title": "Intelligence Information Processing and Trusted Computing, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-cg/2005/2473/0/24730133", "title": "High Quality Triangulation of Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cad-cg/2005/24730133/12OmNyUWR09", "parentPublication": { "id": "proceedings/cad-cg/2005/2473/0", "title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2004/2227/0/22270266", "title": "Curvature Dependent Polygonization of Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2004/22270266/12OmNyv7mut", "parentPublication": { "id": "proceedings/sibgrapi/2004/2227/0", "title": "Proceedings. 17th Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2006/2754/0/27540355", "title": "Subdivision Interpolating Polygonization of Implicit Surfaces with Normal Meshes", "doi": null, "abstractUrl": "/proceedings-article/icat/2006/27540355/12OmNzBwGxn", "parentPublication": { "id": "proceedings/icat/2006/2754/0", "title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/04/v0346", "title": "Robust Creation of Implicit Surfaces from Polygonal Meshes", "doi": null, "abstractUrl": "/journal/tg/2002/04/v0346/13rRUxD9h4Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwt5sgJ", "title": "CVPR 2011", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNvBrgHF", "doi": "10.1109/CVPR.2011.5995576", "title": "Topology-adaptive multi-view photometric stereo", "normalizedTitle": "Topology-adaptive multi-view photometric stereo", "abstract": "In this paper, we present a novel technique that enables capturing of detailed 3D models from flash photographs integrating shading and silhouette cues. Our main contribution is an optimization framework which not only captures subtle surface details but also handles changes in topology. To incorporate normals estimated from shading, we employ a mesh-based deformable model using deformation gradient. This method is capable of manipulating precise geometry and, in fact, it outperforms previous methods in terms of both accuracy and efficiency. To adapt the topology of the mesh, we convert the mesh into an implicit surface representation and then back to a mesh representation. This simple procedure removes self-intersecting regions of the mesh and solves the topology problem effectively. In addition to the algorithm, we introduce a hand-held setup to achieve multi-view photometric stereo. The key idea is to acquire flash photographs from a wide range of positions in order to obtain a sufficient lighting variation even with a standard flash unit attached to the camera. Experimental results showed that our method can capture detailed shapes of various objects and cope with topology changes well.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a novel technique that enables capturing of detailed 3D models from flash photographs integrating shading and silhouette cues. Our main contribution is an optimization framework which not only captures subtle surface details but also handles changes in topology. To incorporate normals estimated from shading, we employ a mesh-based deformable model using deformation gradient. This method is capable of manipulating precise geometry and, in fact, it outperforms previous methods in terms of both accuracy and efficiency. To adapt the topology of the mesh, we convert the mesh into an implicit surface representation and then back to a mesh representation. This simple procedure removes self-intersecting regions of the mesh and solves the topology problem effectively. In addition to the algorithm, we introduce a hand-held setup to achieve multi-view photometric stereo. The key idea is to acquire flash photographs from a wide range of positions in order to obtain a sufficient lighting variation even with a standard flash unit attached to the camera. Experimental results showed that our method can capture detailed shapes of various objects and cope with topology changes well.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a novel technique that enables capturing of detailed 3D models from flash photographs integrating shading and silhouette cues. Our main contribution is an optimization framework which not only captures subtle surface details but also handles changes in topology. To incorporate normals estimated from shading, we employ a mesh-based deformable model using deformation gradient. This method is capable of manipulating precise geometry and, in fact, it outperforms previous methods in terms of both accuracy and efficiency. To adapt the topology of the mesh, we convert the mesh into an implicit surface representation and then back to a mesh representation. This simple procedure removes self-intersecting regions of the mesh and solves the topology problem effectively. In addition to the algorithm, we introduce a hand-held setup to achieve multi-view photometric stereo. The key idea is to acquire flash photographs from a wide range of positions in order to obtain a sufficient lighting variation even with a standard flash unit attached to the camera. Experimental results showed that our method can capture detailed shapes of various objects and cope with topology changes well.", "fno": "05995576", "keywords": [ "Mesh Representation", "Topology Adaptive Multiview Photometric Stereo", "Flash Photographs", "Shading Cue", "Silhouette Cue", "Optimization Framework", "Mesh Based Deformable Model", "Deformation Gradient", "Geometry", "Surface Representation" ], "authors": [ { "affiliation": "Grad. Sch. of Sci. & Technol., Keio Univ., Yokohama, Japan", "fullName": "Y. Yoshiyasu", "givenName": "Y.", "surname": "Yoshiyasu", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Mech. Eng., Keio Univ., Yokohama, Japan", "fullName": "N. Yamazaki", "givenName": "N.", "surname": "Yamazaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-06-01T00:00:00", "pubType": "proceedings", "pages": "1001-1008", "year": "2011", "issn": null, "isbn": "978-1-4577-0394-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05995575", "articleId": "12OmNxwWoIO", "__typename": "AdjacentArticleType" }, "next": { "fno": "05995577", "articleId": "12OmNBqdr2o", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2009/4442/0/05457689", "title": "Detailed body shapes from flash photographs", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457689/12OmNBDQbjn", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncm/2008/3322/2/3322b412", "title": "Distributed Web-Topology Formation with Directional Antenna in Mesh Environment", "doi": null, "abstractUrl": "/proceedings-article/ncm/2008/3322b412/12OmNBr4eLT", "parentPublication": { "id": "proceedings/ncm/2008/3322/2", "title": "Networked Computing and Advanced Information Management, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840b161", "title": "Multiview Photometric Stereo Using Planar Mesh Parameterization", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840b161/12OmNrAv3Cy", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/1996/7518/0/75180038", "title": "Improved Specular Highlights With Adaptive Shading", "doi": null, "abstractUrl": "/proceedings-article/cgi/1996/75180038/12OmNwBT1ig", "parentPublication": { "id": "proceedings/cgi/1996/7518/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sensorcomm/2008/3330/0/3330a758", "title": "A Non-TPC Based Enhanced Topology Control Process for Multi-radio Wireless Mesh Networks", "doi": null, "abstractUrl": "/proceedings-article/sensorcomm/2008/3330a758/12OmNxEjY0d", "parentPublication": { "id": "proceedings/sensorcomm/2008/3330/0", "title": "Sensor Technologies and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/2008/2702/0/04625602", "title": "Mesh Topology Viewer (MTV): an SVG-based interactive mesh network topology visualization tool", "doi": null, "abstractUrl": "/proceedings-article/iscc/2008/04625602/12OmNxecRZ8", "parentPublication": { "id": "proceedings/iscc/2008/2702/0", "title": "2008 IEEE Symposium on Computers and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csie/2009/3507/1/3507a698", "title": "A Topology Preserved Mesh Simplification Algorithm", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507a698/12OmNyFU73Z", "parentPublication": { "id": "csie/2009/3507/1", "title": "Computer Science and Information Engineering, World Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscv/1995/7190/0/71900419", "title": "Reduction of rank-reduced orientation-from-color problem with many unknown lights to two-image known-illuminant photometric stereo", "doi": null, "abstractUrl": "/proceedings-article/iscv/1995/71900419/12OmNyGtjdd", "parentPublication": { "id": "proceedings/iscv/1995/7190/0", "title": "Computer Vision, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/2/3336c971", "title": "Adaptive Mesh Simplification Using Vertex Clustering with Topology Preserving", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336c971/12OmNyRg4mT", "parentPublication": { "id": "proceedings/csse/2008/3336/6", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/08/07565643", "title": "Robust Multiview Photometric Stereo Using Planar Mesh Parameterization", "doi": null, "abstractUrl": "/journal/tp/2017/08/07565643/13rRUxAAT2w", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBKW9zi", "title": "2006 19th Brazilian Symposium on Computer Graphics and Image Processing", "acronym": "sibgrapi", "groupId": "1000131", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNxb5hwt", "doi": "10.1109/SIBGRAPI.2006.40", "title": "Robust adaptive meshes for implicit surfaces", "normalizedTitle": "Robust adaptive meshes for implicit surfaces", "abstract": "This work introduces a robust algorithm for computing good polygonal approximations of implicit surfaces, where robustness entails recovering the exact topology of the implicit surface. Furthermore, the approximate triangle mesh adapts to the geometry and to the topology of the real implicit surface. This method generates an octree subdivided according to the interval evaluation of the implicit function in order to guarantee the robustness, and to the interval automatic differentiation in order to adapt the octree to the geometry of the implicit surface. The triangle mesh is then generated from that octree through an enhanced dual marching.", "abstracts": [ { "abstractType": "Regular", "content": "This work introduces a robust algorithm for computing good polygonal approximations of implicit surfaces, where robustness entails recovering the exact topology of the implicit surface. Furthermore, the approximate triangle mesh adapts to the geometry and to the topology of the real implicit surface. This method generates an octree subdivided according to the interval evaluation of the implicit function in order to guarantee the robustness, and to the interval automatic differentiation in order to adapt the octree to the geometry of the implicit surface. The triangle mesh is then generated from that octree through an enhanced dual marching.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This work introduces a robust algorithm for computing good polygonal approximations of implicit surfaces, where robustness entails recovering the exact topology of the implicit surface. Furthermore, the approximate triangle mesh adapts to the geometry and to the topology of the real implicit surface. This method generates an octree subdivided according to the interval evaluation of the implicit function in order to guarantee the robustness, and to the interval automatic differentiation in order to adapt the octree to the geometry of the implicit surface. The triangle mesh is then generated from that octree through an enhanced dual marching.", "fno": "26860205", "keywords": [ "Implicit Surface", "Dual Marching Cubes", "Robust Algorithms", "Geometric Modelling" ], "authors": [ { "affiliation": "PUC-Rio, Brazil", "fullName": "Afonso Paiva", "givenName": "Afonso", "surname": "Paiva", "__typename": "ArticleAuthorType" }, { "affiliation": "PUC-Rio, Brazil", "fullName": "Helio Lopes", "givenName": "Helio", "surname": "Lopes", "__typename": "ArticleAuthorType" }, { "affiliation": "PUC-Rio, Brazil", "fullName": "Thomas Lewiner", "givenName": "Thomas", "surname": "Lewiner", "__typename": "ArticleAuthorType" }, { "affiliation": "IMPA, Rio de Janeiro, Brazil", "fullName": "Luiz Henrique de Figueiredo", "givenName": "Luiz Henrique", "surname": "de Figueiredo", "__typename": "ArticleAuthorType" } ], "idPrefix": "sibgrapi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-10-01T00:00:00", "pubType": "proceedings", "pages": "205-212", "year": "2006", "issn": "1530-1834", "isbn": "0-7695-2686-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "26860194", "articleId": "12OmNxHryk0", "__typename": "AdjacentArticleType" }, "next": { "fno": "26860213", "articleId": "12OmNzBOi0T", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sibgrapi/2011/4548/0/4548a072", "title": "Beam Casting Implicit Surfaces on the GPU with Interval Arithmetic", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2011/4548a072/12OmNAlvHJe", "parentPublication": { "id": "proceedings/sibgrapi/2011/4548/0", "title": "2011 24th SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2005/2392/0/23920257", "title": "Adaptive Polygonisation of Non-Manifold Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2005/23920257/12OmNAq3hTA", "parentPublication": { "id": "proceedings/cgiv/2005/2392/0", "title": "International Conference on Computer Graphics, Imaging and Visualization (CGIV'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2010/4166/0/4166a026", "title": "Polygonisation of Non-manifold Implicit Surfaces Using a Dual Grid and Points", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2010/4166a026/12OmNwDACjb", "parentPublication": { "id": "proceedings/cgiv/2010/4166/0", "title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iptc/2010/4196/0/4196a220", "title": "Parallel Polygonization of Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/iptc/2010/4196a220/12OmNyQph0Z", "parentPublication": { "id": "proceedings/iptc/2010/4196/0", "title": "Intelligence Information Processing and Trusted Computing, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2001/1227/0/12270254", "title": "Haptic Sculpting of Volumetric Implicit Functions", "doi": null, "abstractUrl": "/proceedings-article/pg/2001/12270254/12OmNzV70HP", "parentPublication": { "id": "proceedings/pg/2001/1227/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/02/ttg2012020188", "title": "Visualizing Nonmanifold and Singular Implicit Surfaces with Point Clouds", "doi": null, "abstractUrl": "/journal/tg/2012/02/ttg2012020188/13rRUwd9CLK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2003/05/mcg2003050070", "title": "Rendering the Intersections of Implicit Surfaces", "doi": null, "abstractUrl": "/magazine/cg/2003/05/mcg2003050070/13rRUwh80JL", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050669", "title": "Data-Parallel Octrees for Surface Reconstruction", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050669/13rRUxCitJ9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/04/v0346", "title": "Robust Creation of Implicit Surfaces from Polygonal Meshes", "doi": null, "abstractUrl": "/journal/tg/2002/04/v0346/13rRUxD9h4Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2001/02/mcg2001020060", "title": "Curvature-Dependent Triangulation of Implicit Surfaces", "doi": null, "abstractUrl": "/magazine/cg/2001/02/mcg2001020060/13rRUxk89gB", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNynsbx3", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2003", "__typename": "ProceedingType" }, "article": { "id": "12OmNzlD9FC", "doi": "10.1109/VISUAL.2003.1250360", "title": "Feature-Sensitive Subdivision and Isosurface Reconstruction", "normalizedTitle": "Feature-Sensitive Subdivision and Isosurface Reconstruction", "abstract": "We present improved subdivision and isosurface reconstruction algorithms for polygonizing implicit surfaces and performing accurate geometric operations. Our improved reconstruction algorithm uses directed distance fields [Kobbelt et al. 2001] to detect multiple intersections along an edge, separates them into components and reconstructs an isosurface locally within each component using the dual contouring algorithm [Ju et al. 2002]. It can reconstruct thin features without creating handles and results in improved surface extraction from volumetric data. Our subdivision algorithm takes into account sharp features that arise from intersecting surfaces or Boolean operations and generates an adaptive grid such that each voxel has at most one sharp feature. The subdivision algorithm is combined with our improved reconstruction algorithm to compute accurate polygonization of Boolean combinations or offsets of complex primitives that faithfully reconstruct the sharp features. We have applied these algorithms to polygonize complex CAD models designed using thousands of Boolean operations on curved primitives.", "abstracts": [ { "abstractType": "Regular", "content": "We present improved subdivision and isosurface reconstruction algorithms for polygonizing implicit surfaces and performing accurate geometric operations. Our improved reconstruction algorithm uses directed distance fields [Kobbelt et al. 2001] to detect multiple intersections along an edge, separates them into components and reconstructs an isosurface locally within each component using the dual contouring algorithm [Ju et al. 2002]. It can reconstruct thin features without creating handles and results in improved surface extraction from volumetric data. Our subdivision algorithm takes into account sharp features that arise from intersecting surfaces or Boolean operations and generates an adaptive grid such that each voxel has at most one sharp feature. The subdivision algorithm is combined with our improved reconstruction algorithm to compute accurate polygonization of Boolean combinations or offsets of complex primitives that faithfully reconstruct the sharp features. We have applied these algorithms to polygonize complex CAD models designed using thousands of Boolean operations on curved primitives.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present improved subdivision and isosurface reconstruction algorithms for polygonizing implicit surfaces and performing accurate geometric operations. Our improved reconstruction algorithm uses directed distance fields [Kobbelt et al. 2001] to detect multiple intersections along an edge, separates them into components and reconstructs an isosurface locally within each component using the dual contouring algorithm [Ju et al. 2002]. It can reconstruct thin features without creating handles and results in improved surface extraction from volumetric data. Our subdivision algorithm takes into account sharp features that arise from intersecting surfaces or Boolean operations and generates an adaptive grid such that each voxel has at most one sharp feature. The subdivision algorithm is combined with our improved reconstruction algorithm to compute accurate polygonization of Boolean combinations or offsets of complex primitives that faithfully reconstruct the sharp features. We have applied these algorithms to polygonize complex CAD models designed using thousands of Boolean operations on curved primitives.", "fno": "20300014", "keywords": [ "Implicit Modeling", "Boolean Operations", "Marching Cubes", "Distance Fields", "Subdivision" ], "authors": [ { "affiliation": "University of North Carolina at Chapel Hill", "fullName": "Gokul Varadhan", "givenName": "Gokul", "surname": "Varadhan", "__typename": "ArticleAuthorType" }, { "affiliation": "AT&T Research Labs", "fullName": "Shankar Krishnan", "givenName": "Shankar", "surname": "Krishnan", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina at Chapel Hill", "fullName": "Young J. Kim", "givenName": "Young J.", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina at Chapel Hill", "fullName": "Dinesh Manocha", "givenName": "Dinesh", "surname": "Manocha", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2003-10-01T00:00:00", "pubType": "proceedings", "pages": "14", "year": "2003", "issn": null, "isbn": "0-7695-2030-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "20300013", "articleId": "12OmNApcukc", "__typename": "AdjacentArticleType" }, "next": { "fno": "20300015", "articleId": "12OmNyv7mgw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pg/2002/1784/0/17840477", "title": "Subdivision Surface Simplification", "doi": null, "abstractUrl": "/proceedings-article/pg/2002/17840477/12OmNviZldv", "parentPublication": { "id": "proceedings/pg/2002/1784/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpp/2009/3802/0/3802a100", "title": "Fast Isosurface Extraction for Medical Volume Dataset on Cell BE", "doi": null, "abstractUrl": "/proceedings-article/icpp/2009/3802a100/12OmNwLfMAE", "parentPublication": { "id": "proceedings/icpp/2009/3802/0", "title": "2009 International Conference on Parallel Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1998/9176/0/91760159", "title": "Isosurface Extraction in Time-Varying Fields Using a Temporal Hierarchical Index Tree", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1998/91760159/12OmNxYL5dN", "parentPublication": { "id": "proceedings/ieee-vis/1998/9176/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2001/1227/0/12270160", "title": "Direct Reconstruction of Displaced Subdivision Surface from Unorganized Points", "doi": null, "abstractUrl": "/proceedings-article/pg/2001/12270160/12OmNxvwp1G", "parentPublication": { "id": "proceedings/pg/2001/1227/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1998/9176/0/91760167", "title": "Interactive Out-Of-Core Isosurface Extraction", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1998/91760167/12OmNyQ7FRQ", "parentPublication": { "id": "proceedings/ieee-vis/1998/9176/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fskd/2009/3735/5/3735e092", "title": "A Novel Adaptive Algorithm of Catmull-Clark Subdivision Surfaces", "doi": null, "abstractUrl": "/proceedings-article/fskd/2009/3735e092/12OmNyaoDEu", "parentPublication": { "id": "proceedings/fskd/2009/3735/5", "title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icctd/2009/3892/2/3892b345", "title": "Butterfly Subdivision Scheme Used for the Unorganized Points Reconstruction in Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/icctd/2009/3892b345/12OmNz2TCJB", "parentPublication": { "id": "proceedings/icctd/2009/3892/2", "title": "Computer Technology and Development, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2011/4541/0/4541a472", "title": "A Modification Based on Butterfly Subdivision Scheme", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a472/12OmNz2kqfJ", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/06/ttg2009061227", "title": "Verifiable Visualization for Isosurface Extraction", "doi": null, "abstractUrl": "/journal/tg/2009/06/ttg2009061227/13rRUwInvsH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1997/02/v0158", "title": "Speeding Up Isosurface Extraction Using Interval Trees", "doi": null, "abstractUrl": "/journal/tg/1997/02/v0158/13rRUxcKzVc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCf1Dp1", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2002", "__typename": "ProceedingType" }, "article": { "id": "12OmNzuZUso", "doi": "10.1109/VISUAL.2002.1183808", "title": "Approximating Normals for Marching Cubes applied to Locally Supported Isosurfaces", "normalizedTitle": "Approximating Normals for Marching Cubes applied to Locally Supported Isosurfaces", "abstract": "We present some new methods for computing estimates of normal vectors at the vertices of a triangular mesh surface approximation to an isosurface which has been computed by the marching cube algorithm. These estimates are required for the smooth rendering of triangular mesh surfaces. The conventional method of computing estimates based upon divided difference approximations of the gradient can lead to poor estimates in some applications. This is particularly true for isosurfaces obtained from a field function, which is defined only for values near to the isosurface. We describe some efficient methods for computing the topology of the triangular mesh surface, which is used for obtaining local estimates of the normals. In addition, a new, one pass, approach for these types of applications is described and compared to existing methods.", "abstracts": [ { "abstractType": "Regular", "content": "We present some new methods for computing estimates of normal vectors at the vertices of a triangular mesh surface approximation to an isosurface which has been computed by the marching cube algorithm. These estimates are required for the smooth rendering of triangular mesh surfaces. The conventional method of computing estimates based upon divided difference approximations of the gradient can lead to poor estimates in some applications. This is particularly true for isosurfaces obtained from a field function, which is defined only for values near to the isosurface. We describe some efficient methods for computing the topology of the triangular mesh surface, which is used for obtaining local estimates of the normals. In addition, a new, one pass, approach for these types of applications is described and compared to existing methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present some new methods for computing estimates of normal vectors at the vertices of a triangular mesh surface approximation to an isosurface which has been computed by the marching cube algorithm. These estimates are required for the smooth rendering of triangular mesh surfaces. The conventional method of computing estimates based upon divided difference approximations of the gradient can lead to poor estimates in some applications. This is particularly true for isosurfaces obtained from a field function, which is defined only for values near to the isosurface. We describe some efficient methods for computing the topology of the triangular mesh surface, which is used for obtaining local estimates of the normals. In addition, a new, one pass, approach for these types of applications is described and compared to existing methods.", "fno": "7498nielson", "keywords": [ "Isosurface", "Normal Vectors", "Marching Cubes", "Triangular Mesh", "Topology", "Gouraud Shading", "Approximation" ], "authors": [ { "affiliation": "Arizona State University", "fullName": "Gregory M. Nielson", "givenName": "Gregory M.", "surname": "Nielson", "__typename": "ArticleAuthorType" }, { "affiliation": "Arizona State University", "fullName": "Adam Huang", "givenName": "Adam", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Arizona State University", "fullName": "Steve Sylvester", "givenName": "Steve", "surname": "Sylvester", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2002-10-01T00:00:00", "pubType": "proceedings", "pages": "null", "year": "2002", "issn": "1070-2385", "isbn": "0-7803-7498-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7498taubin", "articleId": "12OmNCtMM1P", "__typename": "AdjacentArticleType" }, "next": { "fno": "7498balmelli", "articleId": "12OmNqFJhHx", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2004/8788/0/87880489", "title": "Dual Marching Cubes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880489/12OmNAWpynS", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mvhi/2010/4009/0/4009a608", "title": "Three-Dimensional Reconstruction of Medical Image Based on Improved Marching Cubes Algorithm", "doi": null, "abstractUrl": "/proceedings-article/mvhi/2010/4009a608/12OmNBtUdOc", "parentPublication": { "id": "proceedings/mvhi/2010/4009/0", "title": "Machine Vision and Human-machine Interface, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1997/8262/0/82620221", "title": "Interval volume tetrahedrization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1997/82620221/12OmNC4eSCM", "parentPublication": { "id": "proceedings/ieee-vis/1997/8262/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/etcs/2010/3987/1/3987a194", "title": "Improvement of Marching Cubes Algorithm Based on Sign Determination", "doi": null, "abstractUrl": "/proceedings-article/etcs/2010/3987a194/12OmNxWcH0N", "parentPublication": { "id": "proceedings/etcs/2010/3987/1", "title": "Education Technology and Computer Science, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300009", "title": "MC*: Star Functions for Marching Cubes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300009/12OmNyprnzr", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2009/02/mcs2009020082", "title": "Marching Cubes without Skinny Triangles", "doi": null, "abstractUrl": "/magazine/cs/2009/02/mcs2009020082/13rRUILc8aT", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/01/v0016", "title": "Improving the Robustness and Accuracy of the Marching Cubes Algorithm for Isosurfacing", "doi": null, "abstractUrl": "/journal/tg/2003/01/v0016/13rRUNvgyWa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1997/03/v0215", "title": "On Approximating Contours of the Piecewise Trilinear Interpolant Using Triangular Rational-Quadratic Bézier Patches", "doi": null, "abstractUrl": "/journal/tg/1997/03/v0215/13rRUNvyaeP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061651", "title": "Edge Groups: An Approach to Understanding the Mesh Quality of Marching Methods", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061651/13rRUxly8XA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/03/v0283", "title": "On Marching Cubes", "doi": null, "abstractUrl": "/journal/tg/2003/03/v0283/13rRUyYjK58", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuisc", "title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "acronym": "sibgrapi", "groupId": "1000131", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNBCqbA2", "doi": "10.1109/SIBGRAPI.2015.44", "title": "Real-Time Local Unfolding for Agents Navigation on Arbitrary Surfaces", "normalizedTitle": "Real-Time Local Unfolding for Agents Navigation on Arbitrary Surfaces", "abstract": "Agents path planning is an essential part of games and crowd simulations. In those contexts they are usually restricted to planar surfaces due to the huge computational cost of mapping arbitrary surfaces to a plane without distortions. Mapping is required to benefit from the lower computational cost of distance calculations on a plane (Euclidean distance) when compared to distances on arbitrary surfaces (Geodesic distance). Although solutions have been presented, none have properly handled non-planar surfaces around the agent. In this paper we present mesh parametrization techniques to unfold the region around the agent allowing to extend to arbitrary surfaces the use of existing path planning algorithms initially designed only for planar surfaces. To mitigate the high computational cost of unfolding the entire surface dynamically, we propose pre-processing stages and massive parallelization, resulting in performances similar to that of using a planar surface. We also present a GPU implementation schema that permits a solution to be computed in real-time allowing agents to navigate on deformable surfaces that require dynamic unfolding of the surface. We present results with over 100k agents to prove the approach practicality.", "abstracts": [ { "abstractType": "Regular", "content": "Agents path planning is an essential part of games and crowd simulations. In those contexts they are usually restricted to planar surfaces due to the huge computational cost of mapping arbitrary surfaces to a plane without distortions. Mapping is required to benefit from the lower computational cost of distance calculations on a plane (Euclidean distance) when compared to distances on arbitrary surfaces (Geodesic distance). Although solutions have been presented, none have properly handled non-planar surfaces around the agent. In this paper we present mesh parametrization techniques to unfold the region around the agent allowing to extend to arbitrary surfaces the use of existing path planning algorithms initially designed only for planar surfaces. To mitigate the high computational cost of unfolding the entire surface dynamically, we propose pre-processing stages and massive parallelization, resulting in performances similar to that of using a planar surface. We also present a GPU implementation schema that permits a solution to be computed in real-time allowing agents to navigate on deformable surfaces that require dynamic unfolding of the surface. We present results with over 100k agents to prove the approach practicality.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Agents path planning is an essential part of games and crowd simulations. In those contexts they are usually restricted to planar surfaces due to the huge computational cost of mapping arbitrary surfaces to a plane without distortions. Mapping is required to benefit from the lower computational cost of distance calculations on a plane (Euclidean distance) when compared to distances on arbitrary surfaces (Geodesic distance). Although solutions have been presented, none have properly handled non-planar surfaces around the agent. In this paper we present mesh parametrization techniques to unfold the region around the agent allowing to extend to arbitrary surfaces the use of existing path planning algorithms initially designed only for planar surfaces. To mitigate the high computational cost of unfolding the entire surface dynamically, we propose pre-processing stages and massive parallelization, resulting in performances similar to that of using a planar surface. We also present a GPU implementation schema that permits a solution to be computed in real-time allowing agents to navigate on deformable surfaces that require dynamic unfolding of the surface. We present results with over 100k agents to prove the approach practicality.", "fno": "7962a009", "keywords": [ "Path Planning", "Collision Avoidance", "Navigation", "Computational Efficiency", "Graphics Processing Units", "Distortion", "Real Time Systems", "Computer Graphics", "Path Planning", "Agents" ], "authors": [ { "affiliation": null, "fullName": "Iago U. Berndt", "givenName": "Iago U.", "surname": "Berndt", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Rafael P. Torchelsen", "givenName": "Rafael P.", "surname": "Torchelsen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Anderson Maciel", "givenName": "Anderson", "surname": "Maciel", "__typename": "ArticleAuthorType" } ], "idPrefix": "sibgrapi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-08-01T00:00:00", "pubType": "proceedings", "pages": "9-16", "year": "2015", "issn": "1530-1834", "isbn": "978-1-4673-7962-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7962a001", "articleId": "12OmNyp9MiX", "__typename": "AdjacentArticleType" }, "next": { "fno": "7962a017", "articleId": "12OmNxaNGjy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2007/0905/0/04161010", "title": "Real-time Path Planning for Virtual Agents in Dynamic Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2007/04161010/12OmNASraPs", "parentPublication": { "id": "proceedings/vr/2007/0905/0", "title": "2007 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2011/4548/0/4548a033", "title": "Semi-automatic Navigation on 3D Triangle Meshes Using BVP Based Path-Planning", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2011/4548a033/12OmNBOCWs5", "parentPublication": { "id": "proceedings/sibgrapi/2011/4548/0", "title": "2011 24th SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761188", "title": "Unfolding warping for object recognition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761188/12OmNvqmUDC", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034b490", "title": "A Shared Autonomy Approach for Wheelchair Navigation Based on Learned User Preferences", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034b490/12OmNynJMST", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223373", "title": "Interaction with virtual agents — Comparison of the participants' experience between an IVR and a semi-IVR system", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223373/12OmNyrIaAL", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446152", "title": "Simulating Movement Interactions Between Avatars & Agents in Virtual Worlds Using Human Motion Constraints", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446152/13bd1tMztYH", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/02/ttg2014020159", "title": "A Whole Surface Approach to Crowd Simulation on Arbitrary Topologies", "doi": null, "abstractUrl": "/journal/tg/2014/02/ttg2014020159/13rRUwInvl2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030666", "title": "Diffusion Equations over Arbitrary Triangulated Surfaces for Filtering and Texture Applications", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030666/13rRUxNEqPK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aipr/2017/1235/0/08457966", "title": "ROS Navigation Stack for Smart Indoor Agents", "doi": null, "abstractUrl": "/proceedings-article/aipr/2017/08457966/13xI8AOXccM", "parentPublication": { "id": "proceedings/aipr/2017/1235/0", "title": "2017 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2020/7303/0/730300a018", "title": "Socially-Aware Multi-agent Velocity Obstacle Based Navigation for Nonholonomic Vehicles", "doi": null, "abstractUrl": "/proceedings-article/compsac/2020/730300a018/1nkDmLRKgEg", "parentPublication": { "id": "proceedings/compsac/2020/7303/0", "title": "2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuirp", "title": "Proceedings of 1st IEEE Workshop on Variational and Level Set Methods in Computer Vision", "acronym": "vlsm", "groupId": "1002233", "volume": "0", "displayVolume": "0", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNCdk2Ax", "doi": "10.1109/VLSM.2001.938899", "title": "Variational Problems and PDE's on Implicit Surfaces", "normalizedTitle": "Variational Problems and PDE's on Implicit Surfaces", "abstract": "A novel framework for solving variational problems and partial differential equations for scalar and vector-valued data defined on surfaces is introduced in this paper. The key idea is to implicitly represent the surface as the level set of a higher dimensional function, and solve the surface equations in a fixed Cartesian coordinate system using this new embedding function. The equations are then both intrinsic to the surface and defined in the embedding space. This approach thereby eliminates the need for performing complicated and not-accurate computations on triangulated surfaces, as it is commonly done in the literature. We describe the framework and present examples in computer graphics and image processing applications, including texture synthesis, flow field visualization, as well as image and vector field intrinsic regularization for data defined on 3D surfaces.", "abstracts": [ { "abstractType": "Regular", "content": "A novel framework for solving variational problems and partial differential equations for scalar and vector-valued data defined on surfaces is introduced in this paper. The key idea is to implicitly represent the surface as the level set of a higher dimensional function, and solve the surface equations in a fixed Cartesian coordinate system using this new embedding function. The equations are then both intrinsic to the surface and defined in the embedding space. This approach thereby eliminates the need for performing complicated and not-accurate computations on triangulated surfaces, as it is commonly done in the literature. We describe the framework and present examples in computer graphics and image processing applications, including texture synthesis, flow field visualization, as well as image and vector field intrinsic regularization for data defined on 3D surfaces.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A novel framework for solving variational problems and partial differential equations for scalar and vector-valued data defined on surfaces is introduced in this paper. The key idea is to implicitly represent the surface as the level set of a higher dimensional function, and solve the surface equations in a fixed Cartesian coordinate system using this new embedding function. The equations are then both intrinsic to the surface and defined in the embedding space. This approach thereby eliminates the need for performing complicated and not-accurate computations on triangulated surfaces, as it is commonly done in the literature. We describe the framework and present examples in computer graphics and image processing applications, including texture synthesis, flow field visualization, as well as image and vector field intrinsic regularization for data defined on 3D surfaces.", "fno": "12780186", "keywords": [ "Variational Problems", "Partial Differential Equations", "Level Set Method", "Implicit Surfaces", "Image Processing", "Computer Graphics", "Flow Visualization", "Regularization", "Pattern Formation", "Texture Synthesis" ], "authors": [ { "affiliation": "University of Minnesota", "fullName": "Marcelo Bertalmio", "givenName": "Marcelo", "surname": "Bertalmio", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Minnesota", "fullName": "Guillermo Sapiro", "givenName": "Guillermo", "surname": "Sapiro", "__typename": "ArticleAuthorType" }, { "affiliation": "UCLA", "fullName": "Li-Tien Cheng", "givenName": "Li-Tien", "surname": "Cheng", "__typename": "ArticleAuthorType" }, { "affiliation": "UCLA", "fullName": "Stanley Osher", "givenName": "Stanley", "surname": "Osher", "__typename": "ArticleAuthorType" } ], "idPrefix": "vlsm", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-07-01T00:00:00", "pubType": "proceedings", "pages": "186", "year": "2001", "issn": null, "isbn": "0-7695-1278-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "12780179", "articleId": "12OmNzahbVS", "__typename": "AdjacentArticleType" }, "next": { "fno": "12780194", "articleId": "12OmNs0C9Kh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuirp", "title": "Proceedings of 1st IEEE Workshop on Variational and Level Set Methods in Computer Vision", "acronym": "vlsm", "groupId": "1002233", "volume": "0", "displayVolume": "0", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNs0C9Kh", "doi": "10.1109/VLSM.2001.938900", "title": "Fast Surface Reconstruction Using the Level Set Method", "normalizedTitle": "Fast Surface Reconstruction Using the Level Set Method", "abstract": "In this paper we describe new formulations and develop fast algorithms for implicit surface reconstruction based on variational and partial differential equation (PDE) methods. In particular we use the level set method and fast sweeping and tagging methods to reconstruct surfaces from scattered data set. The data set might consist of points, curves and/or surface patches. A weighted minimal surface-like model is constructed and its variational level set formulation is implemented with optimal efficiency. The reconstructed surface is smoother than piecewise linear and has a natural scaling in the regularization that allows varying flexibility according to the local sampling density. As is usual with the level set method we can handle complicated topology and deformations, as well as noisy or highly non-uniform data sets easily. The method is based on a simple rectangular grid, although adaptive and triangular grids are also possible. Some consequences, such as hole filling capability, are demonstrated, as well as the viability and convergence of our new fast tagging algorithm.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we describe new formulations and develop fast algorithms for implicit surface reconstruction based on variational and partial differential equation (PDE) methods. In particular we use the level set method and fast sweeping and tagging methods to reconstruct surfaces from scattered data set. The data set might consist of points, curves and/or surface patches. A weighted minimal surface-like model is constructed and its variational level set formulation is implemented with optimal efficiency. The reconstructed surface is smoother than piecewise linear and has a natural scaling in the regularization that allows varying flexibility according to the local sampling density. As is usual with the level set method we can handle complicated topology and deformations, as well as noisy or highly non-uniform data sets easily. The method is based on a simple rectangular grid, although adaptive and triangular grids are also possible. Some consequences, such as hole filling capability, are demonstrated, as well as the viability and convergence of our new fast tagging algorithm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we describe new formulations and develop fast algorithms for implicit surface reconstruction based on variational and partial differential equation (PDE) methods. In particular we use the level set method and fast sweeping and tagging methods to reconstruct surfaces from scattered data set. The data set might consist of points, curves and/or surface patches. A weighted minimal surface-like model is constructed and its variational level set formulation is implemented with optimal efficiency. The reconstructed surface is smoother than piecewise linear and has a natural scaling in the regularization that allows varying flexibility according to the local sampling density. As is usual with the level set method we can handle complicated topology and deformations, as well as noisy or highly non-uniform data sets easily. The method is based on a simple rectangular grid, although adaptive and triangular grids are also possible. Some consequences, such as hole filling capability, are demonstrated, as well as the viability and convergence of our new fast tagging algorithm.", "fno": "12780194", "keywords": [ "Implicit Surface", "Partial Differential Equations", "Variational Formulation", "Convection", "Minimal Surface" ], "authors": [ { "affiliation": null, "fullName": "Hong-Kai Zhao", "givenName": "Hong-Kai", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Stanley Osher", "givenName": "Stanley", "surname": "Osher", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ronald Fedkiw", "givenName": "Ronald", "surname": "Fedkiw", "__typename": "ArticleAuthorType" } ], "idPrefix": "vlsm", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-07-01T00:00:00", "pubType": "proceedings", "pages": "194", "year": "2001", "issn": null, "isbn": "0-7695-1278-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "12780186", "articleId": "12OmNCdk2Ax", "__typename": "AdjacentArticleType" }, "next": { "fno": "12780203", "articleId": "12OmNBAIARN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwekjuO", "title": "1990 Rensselaer's Second International Conference on Computer Integrated Manufacturing", "acronym": "cim", "groupId": "1000133", "volume": "0", "displayVolume": "0", "year": "1990", "__typename": "ProceedingType" }, "article": { "id": "12OmNwDSduM", "doi": "10.1109/CIM.1990.128082", "title": "Feature-based recognition of triangulated arbitrary surfaces", "normalizedTitle": "Feature-based recognition of triangulated arbitrary surfaces", "abstract": "A procedure is presented for recognizing an object from its range data. A set of object surface data points are segmented into triangular patches using a small number of knot points. These knot points are placed by the local adjustment technique. For the resulting triangulated surface model, high-level edge lines are extracted using unit normal vectors of the triangular patches. This set of high-level primitives is used to construct a graph structure for each object. Experimental results are presented for a set of objects with salient features.<>", "abstracts": [ { "abstractType": "Regular", "content": "A procedure is presented for recognizing an object from its range data. A set of object surface data points are segmented into triangular patches using a small number of knot points. These knot points are placed by the local adjustment technique. For the resulting triangulated surface model, high-level edge lines are extracted using unit normal vectors of the triangular patches. This set of high-level primitives is used to construct a graph structure for each object. Experimental results are presented for a set of objects with salient features.<>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A procedure is presented for recognizing an object from its range data. A set of object surface data points are segmented into triangular patches using a small number of knot points. These knot points are placed by the local adjustment technique. For the resulting triangulated surface model, high-level edge lines are extracted using unit normal vectors of the triangular patches. This set of high-level primitives is used to construct a graph structure for each object. Experimental results are presented for a set of objects with salient features.", "fno": "00128082", "keywords": [ "Computer Vision", "Graph Theory", "Computer Vision", "Feature Based Recognition", "Triangulated Arbitrary Surfaces", "Local Adjustment Technique", "High Level Edge Lines", "High Level Primitives", "Graph Structure", "Data Mining", "Transmission Line Matrix Methods", "Computer Integrated Manufacturing", "Geometry", "Optical Reflection", "Solid Modeling", "Linear Approximation", "Polynomials", "Object Recognition", "Data Compression" ], "authors": [ { "affiliation": "Dept. of Electr. Eng., Worcester Polytech. Inst., MA, USA", "fullName": "C.Y. Choo", "givenName": "C.Y.", "surname": "Choo", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electr. Eng., Worcester Polytech. Inst., MA, USA", "fullName": "J.A. Bloom", "givenName": "J.A.", "surname": "Bloom", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "W.I. Kwak", "givenName": "W.I.", "surname": "Kwak", "__typename": "ArticleAuthorType" } ], "idPrefix": "cim", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1990-01-01T00:00:00", "pubType": "proceedings", "pages": "105,106,107,108,109,110,111,112", "year": "1990", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00128081", "articleId": "12OmNyQYtpN", "__typename": "AdjacentArticleType" }, "next": { "fno": "00128083", "articleId": "12OmNCmpcRu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icip/1994/6952/2/00413671", "title": "Reconstruction of visual surfaces from sparse data using parametric triangular approximants", "doi": null, "abstractUrl": "/proceedings-article/icip/1994/00413671/12OmNqNXEqd", "parentPublication": { "id": "proceedings/icip/1994/6952/2", "title": "Proceedings of 1st International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aici/2010/4225/2/4225b256", "title": "Weighted Combination Interpolation by Piecewise Cubic Polynomial on Triangulations", "doi": null, "abstractUrl": "/proceedings-article/aici/2010/4225b256/12OmNvSbBC7", "parentPublication": { "id": "proceedings/aici/2010/4225/2", "title": "Artificial Intelligence and Computational Intelligence, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmp/2004/2078/0/20780079", "title": "Some Estimates of the Height of Rational Bernstein-Bezier Triangular Surfaces", "doi": null, "abstractUrl": "/proceedings-article/gmp/2004/20780079/12OmNxFaLD6", "parentPublication": { "id": "proceedings/gmp/2004/2078/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dim/1999/0062/0/00620302", "title": "Generating Smooth Surfaces with Bicubic Splines over Triangular Meshes: Toward Automatic Model Building from Unorganized 3D Points", "doi": null, "abstractUrl": "/proceedings-article/3dim/1999/00620302/12OmNxGja3m", "parentPublication": { "id": "proceedings/3dim/1999/0062/0", "title": "3D Digital Imaging and Modeling, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisp/2008/3119/4/3119d441", "title": "Triangular-Patch Based Texture Synthesis over Arbitrary Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cisp/2008/3119d441/12OmNxcMSeN", "parentPublication": { "id": "proceedings/cisp/2008/3119/4", "title": "Image and Signal Processing, Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dim/1999/0062/0/00620536", "title": "Curvature Estimation for Segmentation of Triangulated Surfaces", "doi": null, "abstractUrl": "/proceedings-article/3dim/1999/00620536/12OmNxwENKw", "parentPublication": { "id": "proceedings/3dim/1999/0062/0", "title": "3D Digital Imaging and Modeling, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2003/2028/0/20280414", "title": "Implicitizing Bi-Cubic Toric Surfaces by Dixon &Agr-Resultant Quotients", "doi": null, "abstractUrl": "/proceedings-article/pg/2003/20280414/12OmNy7h39Q", "parentPublication": { "id": "proceedings/pg/2003/2028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/04/ttg2011040500", "title": "Approximation of Loop Subdivision Surfaces for Fast Rendering", "doi": null, "abstractUrl": "/journal/tg/2011/04/ttg2011040500/13rRUILtJzt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/01/v0099", "title": "Polynomial Surfaces Interpolating Arbitrary Triangulations", "doi": null, "abstractUrl": "/journal/tg/2003/01/v0099/13rRUwInvAR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030666", "title": "Diffusion Equations over Arbitrary Triangulated Surfaces for Filtering and Texture Applications", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030666/13rRUxNEqPK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzVXNIo", "title": "Image and Signal Processing, Congress on", "acronym": "cisp", "groupId": "1001793", "volume": "4", "displayVolume": "4", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNxcMSeN", "doi": "10.1109/CISP.2008.330", "title": "Triangular-Patch Based Texture Synthesis over Arbitrary Surfaces", "normalizedTitle": "Triangular-Patch Based Texture Synthesis over Arbitrary Surfaces", "abstract": "We present an improved texture synthesis algorithm over arbitrary surfaces in this paper. The algorithm adopt the searching strategy of restriction degree to compute the texture coordinates of each triangular patch. It reduces the restrictive conditions as searching for the best matching patches and debases the complexity of the algorithm at the same time. Then aiming at the local boundary unmatching phenomenon, we find the best cut path between the overlap regions through extending graph cut technique. In particular, we combine the texture synthesis algorithm with illumination model to gain more natural effect. The results show that our solution fits to a wide range of sample textures and arbitrary surfaces.", "abstracts": [ { "abstractType": "Regular", "content": "We present an improved texture synthesis algorithm over arbitrary surfaces in this paper. The algorithm adopt the searching strategy of restriction degree to compute the texture coordinates of each triangular patch. It reduces the restrictive conditions as searching for the best matching patches and debases the complexity of the algorithm at the same time. Then aiming at the local boundary unmatching phenomenon, we find the best cut path between the overlap regions through extending graph cut technique. In particular, we combine the texture synthesis algorithm with illumination model to gain more natural effect. The results show that our solution fits to a wide range of sample textures and arbitrary surfaces.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present an improved texture synthesis algorithm over arbitrary surfaces in this paper. The algorithm adopt the searching strategy of restriction degree to compute the texture coordinates of each triangular patch. It reduces the restrictive conditions as searching for the best matching patches and debases the complexity of the algorithm at the same time. Then aiming at the local boundary unmatching phenomenon, we find the best cut path between the overlap regions through extending graph cut technique. In particular, we combine the texture synthesis algorithm with illumination model to gain more natural effect. The results show that our solution fits to a wide range of sample textures and arbitrary surfaces.", "fno": "3119d441", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Wei Li", "givenName": "Wei", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yuan-yuan Han", "givenName": "Yuan-yuan", "surname": "Han", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jia-xin Chen", "givenName": "Jia-xin", "surname": "Chen", "__typename": "ArticleAuthorType" } ], "idPrefix": "cisp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-05-01T00:00:00", "pubType": "proceedings", "pages": "441-445", "year": "2008", "issn": null, "isbn": "978-0-7695-3119-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3119d436", "articleId": "12OmNx19jTh", "__typename": "AdjacentArticleType" }, "next": { "fno": "3119d449", "articleId": "12OmNwI8cco", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccsit/2009/4519/0/05234981", "title": "Improved graph cuts for patch-based texture synthesis", "doi": null, "abstractUrl": "/proceedings-article/iccsit/2009/05234981/12OmNBBQZtI", "parentPublication": { "id": "proceedings/iccsit/2009/4519/0", "title": "Computer Science and Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciap/2007/2877/0/28770049", "title": "Learning Repetitive Patterns for Classifying Non-Rigidly Deforming Texture Surfaces", "doi": null, "abstractUrl": "/proceedings-article/iciap/2007/28770049/12OmNCmGNXC", "parentPublication": { "id": "proceedings/iciap/2007/2877/0", "title": "2007 14th International Conference on Image Analysis and Processing - ICIAP 2007", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2007/2738/0/04145199", "title": "Discriminability of Real and Virtual Surfaces with Triangular Gratings", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/04145199/12OmNvwkul9", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1988/9999/1/00754002", "title": "Rendering Of Texture On 3D Surfaces", "doi": null, "abstractUrl": "/proceedings-article/acssc/1988/00754002/12OmNxxNbPS", "parentPublication": { "id": "proceedings/acssc/1988/9999/1", "title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1996/3673/0/36730219", "title": "Opacity-modulating Triangular Textures for Irregular Surfaces", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1996/36730219/12OmNyugz1Z", "parentPublication": { "id": "proceedings/ieee-vis/1996/3673/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/1/01394272", "title": "Constrained texture synthesis by scalable sub-patch algorithm", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394272/12OmNzvQI1j", "parentPublication": { "id": "proceedings/icme/2004/8603/1", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/05/v0519", "title": "Decorating Surfaces with Bidirectional Texture Functions", "doi": null, "abstractUrl": "/journal/tg/2005/05/v0519/13rRUwbJD4E", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030666", "title": "Diffusion Equations over Arbitrary Triangulated Surfaces for Filtering and Texture Applications", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030666/13rRUxNEqPK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/03/v0278", "title": "Synthesis and Rendering of Bidirectional Texture Functions on Arbitrary Surfaces", "doi": null, "abstractUrl": "/journal/tg/2004/03/v0278/13rRUyYSWsE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040805", "title": "Globally Optimal Surface Mapping for Surfaces with Arbitrary Topology", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040805/13rRUygT7su", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBBhN9F", "title": "2008 8th IEEE International Conference on Computer and Information Technology", "acronym": "cit", "groupId": "1001306", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNyS6REh", "doi": "10.1109/CIT.2008.4594642", "title": "Triangulation of Implicit Surfaces Based on Particle System", "normalizedTitle": "Triangulation of Implicit Surfaces Based on Particle System", "abstract": "This paper presents a new method for triangulation of implicit surfaces. Particle system is used for sampling on the given implicit surface to get steady and even particles. Two normal added vertices are obtained by extending the particle to an equal distance along the out normal vector and the inner normal vector. Two different methods for triangulation of implicit surfaces are presented. One is the particles and normal added vertices are combined together and divided into tetrahedrons, then delete the normal added vertices and the edges which relate to these vertices. The other is only dividing the normal added vertices into tetrahedrons, then traverse all of the tetrahedrons to find the triangles on implicit surfaces and achieve the final triangular mesh. Examples are included in the end to demonstrate the efficiency of the new methods.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a new method for triangulation of implicit surfaces. Particle system is used for sampling on the given implicit surface to get steady and even particles. Two normal added vertices are obtained by extending the particle to an equal distance along the out normal vector and the inner normal vector. Two different methods for triangulation of implicit surfaces are presented. One is the particles and normal added vertices are combined together and divided into tetrahedrons, then delete the normal added vertices and the edges which relate to these vertices. The other is only dividing the normal added vertices into tetrahedrons, then traverse all of the tetrahedrons to find the triangles on implicit surfaces and achieve the final triangular mesh. Examples are included in the end to demonstrate the efficiency of the new methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a new method for triangulation of implicit surfaces. Particle system is used for sampling on the given implicit surface to get steady and even particles. Two normal added vertices are obtained by extending the particle to an equal distance along the out normal vector and the inner normal vector. Two different methods for triangulation of implicit surfaces are presented. One is the particles and normal added vertices are combined together and divided into tetrahedrons, then delete the normal added vertices and the edges which relate to these vertices. The other is only dividing the normal added vertices into tetrahedrons, then traverse all of the tetrahedrons to find the triangles on implicit surfaces and achieve the final triangular mesh. Examples are included in the end to demonstrate the efficiency of the new methods.", "fno": "04594642", "keywords": [ "Computer Graphics", "Mesh Generation", "Implicit Surface Triangulation", "Particle System", "Tetrahedrons", "Triangular Mesh", "Surface Treatment", "Surface Reconstruction", "Distance Measurement", "Image Generation", "Computer Science", "Rough Surfaces", "Computers" ], "authors": [ { "affiliation": "School of Computer, Science and Technology, Shandong University, China", "fullName": "Yuanfeng Zhou", "givenName": null, "surname": "Yuanfeng Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer, Science and Technology, Shandong University, China", "fullName": "Caiming Zhang", "givenName": null, "surname": "Caiming Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer, Science and Technology, The University of HongKong, China", "fullName": "Pengbo Bo", "givenName": null, "surname": "Pengbo Bo", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer, Science and Technology, Shandong University, China", "fullName": "Weitao Li", "givenName": null, "surname": "Weitao Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "cit", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-07-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": null, "isbn": "978-1-4244-2357-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04594641", "articleId": "12OmNx5piS9", "__typename": "AdjacentArticleType" }, "next": { "fno": "04594643", "articleId": "12OmNAle6Pz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pg/2002/1784/0/17840475", "title": "Interactive Visualization of Non-Manifold Implicit Surfaces Using Pre-Integrated Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/pg/2002/17840475/12OmNBqv2go", "parentPublication": { "id": "proceedings/pg/2002/1784/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2016/2303/0/2303a025", "title": "Detail-Preserving 3D Shape Modeling from Raw Volumetric Dataset via Hessian-Constrained Local Implicit Surfaces Optimization", "doi": null, "abstractUrl": "/proceedings-article/cw/2016/2303a025/12OmNCbU38Z", "parentPublication": { "id": "proceedings/cw/2016/2303/0", "title": "2016 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmp/2002/1674/0/16740138", "title": "Non-Manifold Implicit Surfaces Based on Discontinuous Implicitization and Polygonization", "doi": null, "abstractUrl": "/proceedings-article/gmp/2002/16740138/12OmNwbLVmQ", "parentPublication": { "id": "proceedings/gmp/2002/1674/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2016/3568/0/3568a136", "title": "Method Based on Triangulation for Sensor Deployment on 3D Surfaces", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2016/3568a136/12OmNxG1yK5", "parentPublication": { "id": "proceedings/sibgrapi/2016/3568/0", "title": "2016 29th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-cg/2005/2473/0/24730133", "title": "High Quality Triangulation of Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cad-cg/2005/24730133/12OmNyUWR09", "parentPublication": { "id": "proceedings/cad-cg/2005/2473/0", "title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2006/2754/0/27540355", "title": "Subdivision Interpolating Polygonization of Implicit Surfaces with Normal Meshes", "doi": null, "abstractUrl": "/proceedings-article/icat/2006/27540355/12OmNzBwGxn", "parentPublication": { "id": "proceedings/icat/2006/2754/0", "title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2010/4215/0/4215a113", "title": "Optimizing Triangulation of Implicit Surface Based on Quadric Error Metrics", "doi": null, "abstractUrl": "/proceedings-article/cw/2010/4215a113/12OmNzahc4Q", "parentPublication": { "id": "proceedings/cw/2010/4215/0", "title": "2010 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2018/02/07530931", "title": "Analytical Model for Resistivity and Mean Free Path in On-Chip Interconnects with Rough Surfaces", "doi": null, "abstractUrl": "/journal/ec/2018/02/07530931/13rRUxBJhqI", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/04/v0346", "title": "Robust Creation of Implicit Surfaces from Polygonal Meshes", "doi": null, "abstractUrl": "/journal/tg/2002/04/v0346/13rRUxD9h4Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2001/02/mcg2001020060", "title": "Curvature-Dependent Triangulation of Implicit Surfaces", "doi": null, "abstractUrl": "/magazine/cg/2001/02/mcg2001020060/13rRUxk89gB", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyaXPP0", "title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)", "acronym": "cad-cg", "groupId": "1001488", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNyUWR09", "doi": "10.1109/CAD-CG.2005.46", "title": "High Quality Triangulation of Implicit Surfaces", "normalizedTitle": "High Quality Triangulation of Implicit Surfaces", "abstract": "We present a new high quality tessellation method for implicit surfaces in this paper. The approach can handle arbitrary implicit functions and dynamic implicit surfaces based on skeletal primitives. We first samples the implicit surface uniformly using particle fission and floating, then reconstructs a triangular mesh from the sample points using ball pivoting algorithm (BPA). Finally, we subdivide the reconstructed surface using a 1 to 4 subdivision scheme to obtain the high quality implicit surface tessellation.", "abstracts": [ { "abstractType": "Regular", "content": "We present a new high quality tessellation method for implicit surfaces in this paper. The approach can handle arbitrary implicit functions and dynamic implicit surfaces based on skeletal primitives. We first samples the implicit surface uniformly using particle fission and floating, then reconstructs a triangular mesh from the sample points using ball pivoting algorithm (BPA). Finally, we subdivide the reconstructed surface using a 1 to 4 subdivision scheme to obtain the high quality implicit surface tessellation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a new high quality tessellation method for implicit surfaces in this paper. The approach can handle arbitrary implicit functions and dynamic implicit surfaces based on skeletal primitives. We first samples the implicit surface uniformly using particle fission and floating, then reconstructs a triangular mesh from the sample points using ball pivoting algorithm (BPA). Finally, we subdivide the reconstructed surface using a 1 to 4 subdivision scheme to obtain the high quality implicit surface tessellation.", "fno": "24730133", "keywords": [], "authors": [ { "affiliation": "Zhejiang University, China", "fullName": "Shengjun Liu", "givenName": "Shengjun", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University, China", "fullName": "Xuehui Yin", "givenName": "Xuehui", "surname": "Yin", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University, China", "fullName": "Xiaogang Jin", "givenName": "Xiaogang", "surname": "Jin", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University, China", "fullName": "Jieqing Feng", "givenName": "Jieqing", "surname": "Feng", "__typename": "ArticleAuthorType" } ], "idPrefix": "cad-cg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-12-01T00:00:00", "pubType": "proceedings", "pages": "133-138", "year": "2005", "issn": null, "isbn": "0-7695-2473-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "24730125", "articleId": "12OmNvqmUCM", "__typename": "AdjacentArticleType" }, "next": { "fno": "24730139", "articleId": "12OmNAsBFKT", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/smi/2001/0853/0/08530062", "title": "Implicit Surfaces that Interpolate", "doi": null, "abstractUrl": "/proceedings-article/smi/2001/08530062/12OmNAZfxIC", "parentPublication": { "id": "proceedings/smi/2001/0853/0", "title": "Shape Modeling and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1993/3940/0/00398875", "title": "Implicit stream surfaces", "doi": null, "abstractUrl": "/proceedings-article/visual/1993/00398875/12OmNAlvI2V", "parentPublication": { "id": "proceedings/visual/1993/3940/0", "title": "Proceedings Visualization '93", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2012/0863/0/06183594", "title": "Implicit representation of molecular surfaces", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2012/06183594/12OmNAu1FnZ", "parentPublication": { "id": "proceedings/pacificvis/2012/0863/0", "title": "Visualization Symposium, IEEE Pacific", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2008/2339/0/04563084", "title": "3D non-rigid registration for MPU implicit surfaces", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2008/04563084/12OmNBO3K1b", "parentPublication": { "id": "proceedings/cvprw/2008/2339/0", "title": "2008 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vv/2002/7641/0/76410073", "title": "Dynamic Triangulation of Variational Implicit Surfaces Using Incremental Delaunay Tetrahedralization", "doi": null, "abstractUrl": "/proceedings-article/vv/2002/76410073/12OmNrJiCRK", "parentPublication": { "id": "proceedings/vv/2002/7641/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2006/2686/0/26860205", "title": "Robust adaptive meshes for implicit surfaces", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2006/26860205/12OmNxb5hwt", "parentPublication": { "id": "proceedings/sibgrapi/2006/2686/0", "title": "2006 19th Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iptc/2010/4196/0/4196a220", "title": "Parallel Polygonization of Implicit Surfaces", "doi": null, "abstractUrl": "/proceedings-article/iptc/2010/4196a220/12OmNyQph0Z", "parentPublication": { "id": "proceedings/iptc/2010/4196/0", "title": "Intelligence Information Processing and Trusted Computing, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit/2008/2357/0/04594642", "title": "Triangulation of Implicit Surfaces Based on Particle System", "doi": null, "abstractUrl": "/proceedings-article/cit/2008/04594642/12OmNyS6REh", "parentPublication": { "id": "proceedings/cit/2008/2357/0", "title": "2008 8th IEEE International Conference on Computer and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2003/05/mcg2003050070", "title": "Rendering the Intersections of Implicit Surfaces", "doi": null, "abstractUrl": "/magazine/cg/2003/05/mcg2003050070/13rRUwh80JL", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/04/v0346", "title": "Robust Creation of Implicit Surfaces from Polygonal Meshes", "doi": null, "abstractUrl": "/journal/tg/2002/04/v0346/13rRUxD9h4Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1lEiCTgo8", "doi": "10.1109/CVPR52688.2022.01797", "title": "3PSDF: Three-Pole Signed Distance Function for Learning Surfaces with Arbitrary Topologies", "normalizedTitle": "3PSDF: Three-Pole Signed Distance Function for Learning Surfaces with Arbitrary Topologies", "abstract": "Recent advances in learning 3D shapes using neural implicit functions have achieved impressive results by breaking the previous barrier of resolution and diversity for varying topologies. However, most of such approaches are limited to closed surfaces as they require the space to be divided into inside and outside. More recent works based on unsigned distance function have been proposed to handle complex geometry containing both the open and closed surfaces. Nonetheless, as their direct outputs are point clouds, robustly obtaining high-quality meshing results from discrete points remains an open question. We present a novel learnable implicit representation, called three-pole signed distance function (3PSDF), that can represent non-watertight 3D shapes with arbitrary topologies while supporting easy field-to-mesh conversion using the classic Marching Cubes algorithm. The key to our method is the introduction of a new sign, the NULL sign, in addition to the conventional in and out labels. The existence of the null sign could stop the formation of a closed isosurface derived from the bisector of the in/out regions. Further, we propose a dedicated learning framework to effectively learn 3PSDF without worrying about the vanishing gradient due to the null labels. Experimental results show that our approach outperforms the previous state-of-the-art methods in a wide range of benchmarks both quantitatively and qualitatively.", "abstracts": [ { "abstractType": "Regular", "content": "Recent advances in learning 3D shapes using neural implicit functions have achieved impressive results by breaking the previous barrier of resolution and diversity for varying topologies. However, most of such approaches are limited to closed surfaces as they require the space to be divided into inside and outside. More recent works based on unsigned distance function have been proposed to handle complex geometry containing both the open and closed surfaces. Nonetheless, as their direct outputs are point clouds, robustly obtaining high-quality meshing results from discrete points remains an open question. We present a novel learnable implicit representation, called three-pole signed distance function (3PSDF), that can represent non-watertight 3D shapes with arbitrary topologies while supporting easy field-to-mesh conversion using the classic Marching Cubes algorithm. The key to our method is the introduction of a new sign, the NULL sign, in addition to the conventional in and out labels. The existence of the null sign could stop the formation of a closed isosurface derived from the bisector of the in/out regions. Further, we propose a dedicated learning framework to effectively learn 3PSDF without worrying about the vanishing gradient due to the null labels. Experimental results show that our approach outperforms the previous state-of-the-art methods in a wide range of benchmarks both quantitatively and qualitatively.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent advances in learning 3D shapes using neural implicit functions have achieved impressive results by breaking the previous barrier of resolution and diversity for varying topologies. However, most of such approaches are limited to closed surfaces as they require the space to be divided into inside and outside. More recent works based on unsigned distance function have been proposed to handle complex geometry containing both the open and closed surfaces. Nonetheless, as their direct outputs are point clouds, robustly obtaining high-quality meshing results from discrete points remains an open question. We present a novel learnable implicit representation, called three-pole signed distance function (3PSDF), that can represent non-watertight 3D shapes with arbitrary topologies while supporting easy field-to-mesh conversion using the classic Marching Cubes algorithm. The key to our method is the introduction of a new sign, the NULL sign, in addition to the conventional in and out labels. The existence of the null sign could stop the formation of a closed isosurface derived from the bisector of the in/out regions. Further, we propose a dedicated learning framework to effectively learn 3PSDF without worrying about the vanishing gradient due to the null labels. Experimental results show that our approach outperforms the previous state-of-the-art methods in a wide range of benchmarks both quantitatively and qualitatively.", "fno": "694600s8501", "keywords": [ "Computational Geometry", "Data Visualisation", "Geometry", "Image Reconstruction", "Iterative Methods", "Learning Artificial Intelligence", "Mesh Generation", "Solid Modelling", "Field To Mesh Conversion", "Classic Marching Cubes Algorithm", "Null Sign", "Closed Isosurface", "Dedicated Learning Framework", "3 PSDF", "Three Pole Signed Distance Function", "Arbitrary Topologies", "Neural Implicit Functions", "Previous Barrier", "Varying Topologies", "Closed Surfaces", "Unsigned Distance Function", "Open Surfaces", "Point Clouds", "High Quality Meshing Results", "Discrete Points", "Learnable Implicit Representation", "Called Three Pole", "Nonwatertight 3 D Shapes", "Point Cloud Compression", "Geometry", "Computer Vision", "Three Dimensional Displays", "Shape", "Benchmark Testing", "Topology" ], "authors": [ { "affiliation": "Digital Content Technology Center, Tencent Games", "fullName": "Weikai Chen", "givenName": "Weikai", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Digital Content Technology Center, Tencent Games", "fullName": "Cheng Lin", "givenName": "Cheng", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": "Digital Content Technology Center, Tencent Games", "fullName": "Weiyang Li", "givenName": "Weiyang", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Digital Content Technology Center, Tencent Games", "fullName": "Bo Yang", "givenName": "Bo", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "18501-18510", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1lEefwK2s", "name": "pcvpr202269460-09879493s1-mm_694600s8501.zip", "size": "16.2 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879493s1-mm_694600s8501.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600s8490", "articleId": "1H1htgcIy9G", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600s8511", "articleId": "1H1kkAtUvAY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sc/2016/8815/0/8815a761", "title": "Measuring and Understanding Throughput of Network Topologies", "doi": null, "abstractUrl": "/proceedings-article/sc/2016/8815a761/12OmNAkWvv4", "parentPublication": { "id": "proceedings/sc/2016/8815/0", "title": "SC16: International Conference for High Performance Computing, Networking, Storage and Analysis (SC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mcsoc/2013/5086/0/5086a073", "title": "Mapping Non-trivial Network Topologies Onto Chips", "doi": null, "abstractUrl": "/proceedings-article/mcsoc/2013/5086a073/12OmNs5rkYL", "parentPublication": { "id": "proceedings/mcsoc/2013/5086/0", "title": "2013 IEEE 7th International Symposium on Embedded Multicore Socs (MCSoC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isca/2012/0475/0/06237016", "title": "A case for random shortcut topologies for HPC interconnects", "doi": null, "abstractUrl": "/proceedings-article/isca/2012/06237016/12OmNx8Ouog", "parentPublication": { "id": "proceedings/isca/2012/0475/0", "title": "Computer Architecture, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2017/2129/0/212901a664", "title": "HiRy: An Advanced Theory on Design of Deadlock-Free Adaptive Routing for Arbitrary Topologies", "doi": null, "abstractUrl": "/proceedings-article/icpads/2017/212901a664/12OmNxI0Kvn", "parentPublication": { "id": "proceedings/icpads/2017/2129/0", "title": "2017 IEEE 23rd International Conference on Parallel and Distributed Systems (ICPADS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candar/2016/2655/0/2655a188", "title": "Towards Ideal Hop Counts in Interconnection Networks with Arbitrary Size", "doi": null, "abstractUrl": "/proceedings-article/candar/2016/2655a188/12OmNz4BdgJ", "parentPublication": { "id": "proceedings/candar/2016/2655/0", "title": "2016 Fourth International Symposium on Computing and Networking (CANDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/01/v0099", "title": "Polynomial Surfaces Interpolating Arbitrary Triangulations", "doi": null, "abstractUrl": "/journal/tg/2003/01/v0099/13rRUwInvAR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/02/ttg2014020159", "title": "A Whole Surface Approach to Crowd Simulation on Arbitrary Topologies", "doi": null, "abstractUrl": "/journal/tg/2014/02/ttg2014020159/13rRUwInvl2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/nt/2012/01/05934390", "title": "Obtaining provably legitimate internet topologies", "doi": null, "abstractUrl": "/journal/nt/2012/01/05934390/13rRUxBJhCX", "parentPublication": { "id": "trans/nt", "title": "IEEE/ACM Transactions on Networking", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cic/2018/9502/0/950200a036", "title": "Link-Sign Prediction in Dynamic Signed Directed Networks", "doi": null, "abstractUrl": "/proceedings-article/cic/2018/950200a036/17D45WK5Asn", "parentPublication": { "id": "proceedings/cic/2018/9502/0", "title": "2018 IEEE 4th International Conference on Collaboration and Internet Computing (CIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2021/3176/0/09667017", "title": "Deep Parametric Surfaces for 3D Outfit Reconstruction from Single View Image", "doi": null, "abstractUrl": "/proceedings-article/fg/2021/09667017/1A6BEk9xRaU", "parentPublication": { "id": "proceedings/fg/2021/3176/0", "title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzgNXZA", "title": "2012 Fifth International Joint Conference on Computational Sciences and Optimization (CSO)", "acronym": "cso", "groupId": "1002829", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNwDACzm", "doi": "10.1109/CSO.2012.160", "title": "Novel Pre-treatment for Inhomogeneous Dielectric Media in Finite Element Analysis", "normalizedTitle": "Novel Pre-treatment for Inhomogeneous Dielectric Media in Finite Element Analysis", "abstract": "A novel effective geometric modeling approach for finite element mesh generation is proposed in this paper based on finite element analysis software (FEAS). The grid matching problem of discontinuous dielectric media is dealt with from the perspective of geometric modeling. Examples show that the presented techniques are simple and accurate, and thus they can be used for modeling.", "abstracts": [ { "abstractType": "Regular", "content": "A novel effective geometric modeling approach for finite element mesh generation is proposed in this paper based on finite element analysis software (FEAS). The grid matching problem of discontinuous dielectric media is dealt with from the perspective of geometric modeling. Examples show that the presented techniques are simple and accurate, and thus they can be used for modeling.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A novel effective geometric modeling approach for finite element mesh generation is proposed in this paper based on finite element analysis software (FEAS). The grid matching problem of discontinuous dielectric media is dealt with from the perspective of geometric modeling. Examples show that the presented techniques are simple and accurate, and thus they can be used for modeling.", "fno": "06274821", "keywords": [ "Computational Electromagnetics", "Dielectric Materials", "Inhomogeneous Media", "Mesh Generation", "Solid Modelling", "Pretreatment", "Inhomogeneous Dielectric Media", "Geometric Modeling Approach", "Finite Element Mesh Generation", "Finite Element Analysis Software", "FEAS", "Grid Matching Problem", "Discontinuous Dielectric Media", "Finite Element Methods", "Dielectrics", "Software", "Electromagnetics", "Electromagnetic Scattering", "Nonhomogeneous Media", "Pre Treatment", "Finite Element Analysis", "FEAS" ], "authors": [ { "affiliation": null, "fullName": "Jin Tian", "givenName": "Jin", "surname": "Tian", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Li Gong", "givenName": "Li", "surname": "Gong", "__typename": "ArticleAuthorType" } ], "idPrefix": "cso", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-06-01T00:00:00", "pubType": "proceedings", "pages": "699-701", "year": "2012", "issn": null, "isbn": "978-1-4673-1365-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06274820", "articleId": "12OmNAle6K5", "__typename": "AdjacentArticleType" }, "next": { "fno": "06274822", "articleId": "12OmNxZkhut", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isot/2014/6752/0/07119390", "title": "Interaction of Acoustic Solitons with Inhomogeneous Media Containing a Spherical Shape Defect", "doi": null, "abstractUrl": "/proceedings-article/isot/2014/07119390/12OmNAXPyed", "parentPublication": { "id": "proceedings/isot/2014/6752/0", "title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hipcw/2016/5773/0/07837056", "title": "An Object Oriented Parallel Finite Element Scheme for Computations of PDEs: Design and Implementation", "doi": null, "abstractUrl": "/proceedings-article/hipcw/2016/07837056/12OmNAkWvvg", "parentPublication": { "id": "proceedings/hipcw/2016/5773/0", "title": "2016 IEEE 23rd International Conference on High-Performance Computing: Workshops (HiPCW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scamc/1978/9999/0/00679904", "title": "Absorbed-Dose Computations For Inhomogeneous Media In Radiation-Treatment Planning Using Differential Scatter-Air Ratios", "doi": null, "abstractUrl": "/proceedings-article/scamc/1978/00679904/12OmNCmpcO3", "parentPublication": { "id": "proceedings/scamc/1978/9999/0", "title": "1978 The Second Annual Symposium on Computer Application in Medical Care", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2009/3583/2/3583b810", "title": "Finite Element/Infinite Element Method for Acoustic Scattering Problem", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583b810/12OmNwDSdoq", "parentPublication": { "id": "proceedings/icmtma/2009/3583/2", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2012/4896/0/4896a687", "title": "Adaptive Finite Element Analysis in the Application of Electromagnetic Mechanics", "doi": null, "abstractUrl": "/proceedings-article/cis/2012/4896a687/12OmNwHQB8G", "parentPublication": { "id": "proceedings/cis/2012/4896/0", "title": "2012 Eighth International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/1994/5090/2/00323237", "title": "Experience with automatic, dynamic load balancing and adaptive finite element computation", "doi": null, "abstractUrl": "/proceedings-article/hicss/1994/00323237/12OmNwO5LUx", "parentPublication": { "id": "proceedings/hicss/1994/5090/2", "title": "Proceedings of the Twenty-Seventh Annual Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2010/4270/0/4270a281", "title": "Lagrange-Galerkin Discontinuous Finite Element Methods for the Navier-Stokes Equations", "doi": null, "abstractUrl": "/proceedings-article/iccis/2010/4270a281/12OmNxGALdu", "parentPublication": { "id": "proceedings/iccis/2010/4270/0", "title": "2010 International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eidwt/2013/2141/0/5044a145", "title": "Superconvergence of Discontinuous Finite Element Method for Delay-Differential Equations", "doi": null, "abstractUrl": "/proceedings-article/eidwt/2013/5044a145/12OmNyYm2pj", "parentPublication": { "id": "proceedings/eidwt/2013/2141/0", "title": "2013 Fourth International Conference on Emerging Intelligent Data and Web Technologies (EIDWT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fmpc/1988/5892/0/00047479", "title": "A parallel algorithm for finite element computation", "doi": null, "abstractUrl": "/proceedings-article/fmpc/1988/00047479/12OmNzy7uOW", "parentPublication": { "id": "proceedings/fmpc/1988/5892/0", "title": "Proceedings 2nd Symposium on the Frontiers of Massively Parallel Computation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2020/06/08843948", "title": "Optimal Kernel Design for Finite-Element Numerical Integration on GPUs", "doi": null, "abstractUrl": "/magazine/cs/2020/06/08843948/1dqspanAyFW", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAH5djW", "title": "2017 IEEE International Parallel and Distributed Processing Symposium: Workshops (IPDPSW)", "acronym": "ipdpsw", "groupId": "1800044", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNwIpNl9", "doi": "10.1109/IPDPSW.2017.93", "title": "Architecting the Discontinuous Deformation Analysis Method Pipeline on the GPU", "normalizedTitle": "Architecting the Discontinuous Deformation Analysis Method Pipeline on the GPU", "abstract": "As an important numerical analysis method of rock mechanics, discontinuous deformation analysis (DDA) has been widely used in rock engineering. DDA has certain advantages such as the large time step and the large deformation, at the cost of relatively low computing efficiency. To address the efficiency bottleneck of DDA, this paper proposes a complete graphics processing unit (GPU)-based version. The entire DDA pipeline, involving contact detection, global matrix building, linear equation solving, and interpenetration checking, is restructured according to the GPU architecture to minimize data transmissions between the host and device. For the equation solver in DDA, a comparison study of the conjugate gradient method with different preconditioners, i.e., block Jacobi, symmetric successive over-relaxation (SSOR) approximate inverse, and ILU, is introduced first, and a novel sparse matrix-vector multiplication (SpMV) method, intended for the sparse block symmetry matrix with distinct features and which outperforms cuSPARSE by 2.8 times, is proposed as well. Schemes to solve memory write conflicts and branch divergences on the GPU are also introduced in contact detection, global matrix building, and interpenetration checking. For the stable analysis of a slope, the proposed GPU-based DDA with double precision achieved a speed-up rate that was 48.72 times higher than that of the original CPU-based serial implementation.", "abstracts": [ { "abstractType": "Regular", "content": "As an important numerical analysis method of rock mechanics, discontinuous deformation analysis (DDA) has been widely used in rock engineering. DDA has certain advantages such as the large time step and the large deformation, at the cost of relatively low computing efficiency. To address the efficiency bottleneck of DDA, this paper proposes a complete graphics processing unit (GPU)-based version. The entire DDA pipeline, involving contact detection, global matrix building, linear equation solving, and interpenetration checking, is restructured according to the GPU architecture to minimize data transmissions between the host and device. For the equation solver in DDA, a comparison study of the conjugate gradient method with different preconditioners, i.e., block Jacobi, symmetric successive over-relaxation (SSOR) approximate inverse, and ILU, is introduced first, and a novel sparse matrix-vector multiplication (SpMV) method, intended for the sparse block symmetry matrix with distinct features and which outperforms cuSPARSE by 2.8 times, is proposed as well. Schemes to solve memory write conflicts and branch divergences on the GPU are also introduced in contact detection, global matrix building, and interpenetration checking. For the stable analysis of a slope, the proposed GPU-based DDA with double precision achieved a speed-up rate that was 48.72 times higher than that of the original CPU-based serial implementation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As an important numerical analysis method of rock mechanics, discontinuous deformation analysis (DDA) has been widely used in rock engineering. DDA has certain advantages such as the large time step and the large deformation, at the cost of relatively low computing efficiency. To address the efficiency bottleneck of DDA, this paper proposes a complete graphics processing unit (GPU)-based version. The entire DDA pipeline, involving contact detection, global matrix building, linear equation solving, and interpenetration checking, is restructured according to the GPU architecture to minimize data transmissions between the host and device. For the equation solver in DDA, a comparison study of the conjugate gradient method with different preconditioners, i.e., block Jacobi, symmetric successive over-relaxation (SSOR) approximate inverse, and ILU, is introduced first, and a novel sparse matrix-vector multiplication (SpMV) method, intended for the sparse block symmetry matrix with distinct features and which outperforms cuSPARSE by 2.8 times, is proposed as well. Schemes to solve memory write conflicts and branch divergences on the GPU are also introduced in contact detection, global matrix building, and interpenetration checking. For the stable analysis of a slope, the proposed GPU-based DDA with double precision achieved a speed-up rate that was 48.72 times higher than that of the original CPU-based serial implementation.", "fno": "07965172", "keywords": [ "Conjugate Gradient Methods", "Discrete Element Method", "Graphics Processing Units", "Matrix Multiplication", "Parallel Processing", "Sparse Matrices", "Discontinuous Deformation Analysis Method Pipeline", "DDA", "GPU Architecture", "Numerical Analysis", "Rock Mechanics", "Rock Engineering", "Computing Efficiency", "Graphics Processing Unit", "Contact Detection", "Global Matrix Building", "Linear Equation Solving", "Interpenetration Checking", "Conjugate Gradient Method", "Sparse Matrix Vector Multiplication", "Sp MV Method", "Sparse Block Symmetry Matrix", "Memory Write Conflicts", "Branch Divergences", "Discrete Element Method", "Graphics Processing Units", "Sparse Matrices", "Pipelines", "Mathematical Model", "Jacobian Matrices", "Buildings", "Finite Element Analysis", "Discontinuous Deformation Analysis", "Graphics Processing Unit GPU", "Sparse Matrix Vector Multiplication Sp MV", "Contact Detection", "Memory Write Conflict", "Branch Divergence" ], "authors": [ { "affiliation": null, "fullName": "Yunfan Xiao", "givenName": "Yunfan", "surname": "Xiao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Min Huang", "givenName": "Min", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qinghai Miao", "givenName": "Qinghai", "surname": "Miao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jun Xiao", "givenName": "Jun", "surname": "Xiao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ying Wang", "givenName": "Ying", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "ipdpsw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-05-01T00:00:00", "pubType": "proceedings", "pages": "1188-1197", "year": "2017", "issn": null, "isbn": "978-1-5386-3408-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07965171", "articleId": "12OmNynJMYu", "__typename": "AdjacentArticleType" }, "next": { "fno": "07965173", "articleId": "12OmNybfqXQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/chinagrid/2011/0885/0/06051774", "title": "Optimizing Algorithm of Sparse Linear Systems on GPU", "doi": null, "abstractUrl": "/proceedings-article/chinagrid/2011/06051774/12OmNBOllgY", "parentPublication": { "id": "proceedings/chinagrid/2011/0885/0", "title": "2011 Sixth Annual Chinagrid Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asap/2015/1925/0/07245714", "title": "GPU-based multifrontal optimizing method in sparse Cholesky factorization", "doi": null, "abstractUrl": "/proceedings-article/asap/2015/07245714/12OmNBpVQ9K", "parentPublication": { "id": "proceedings/asap/2015/1925/0", "title": "2015 IEEE 26th International Conference on Application-specific Systems, Architectures and Processors (ASAP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-icess/2012/4749/0/4749b307", "title": "Fast sparse matrix-vector multiplication on graphics processing unit for finite element analysis", "doi": null, "abstractUrl": "/proceedings-article/hpcc-icess/2012/4749b307/12OmNvFHfCz", "parentPublication": { "id": "proceedings/hpcc-icess/2012/4749/0", "title": "High Performance Computing and Communication &amp; IEEE International Conference on Embedded Software and Systems, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2012/4789/0/4789a673", "title": "Storage and Solving of Large Sparse Matrix Linear Equations", "doi": null, "abstractUrl": "/proceedings-article/iccis/2012/4789a673/12OmNx7ov3K", "parentPublication": { "id": "proceedings/iccis/2012/4789/0", "title": "2012 Fourth International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/co-hpc/2014/7564/0/7564a072", "title": "An Implementation of Block Conjugate Gradient Algorithm on CPU-GPU Processors", "doi": null, "abstractUrl": "/proceedings-article/co-hpc/2014/7564a072/12OmNxcdG0k", "parentPublication": { "id": "proceedings/co-hpc/2014/7564/0", "title": "2014 Hardware-Software Co-Design for High Performance Computing (Co-HPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2016/4457/0/4457a785", "title": "A Fine-Grained Parallel Power Flow Method for Large Scale Grid Based on Lightweight GPU Threads", "doi": null, "abstractUrl": "/proceedings-article/icpads/2016/4457a785/12OmNxxdZyg", "parentPublication": { "id": "proceedings/icpads/2016/4457/0", "title": "2016 IEEE 22nd International Conference on Parallel and Distributed Systems (ICPADS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2017/3408/0/07965138", "title": "A Compression Method for Storage Formats of a Sparse Matrix in Solving the Large-Scale Linear Systems", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2017/07965138/12OmNyKJiAU", "parentPublication": { "id": "proceedings/ipdpsw/2017/3408/0", "title": "2017 IEEE International Parallel and Distributed Processing Symposium: Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2022/5444/0/544400a354", "title": "Addressing Irregular Patterns of Matrix Computations on GPUs and Their Impact on Applications Powered by Sparse Direct Solvers", "doi": null, "abstractUrl": "/proceedings-article/sc/2022/544400a354/1I0bSTk6oLe", "parentPublication": { "id": "proceedings/sc/2022/5444/0/", "title": "SC22: International Conference for High Performance Computing, Networking, Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2022/5444/0/544400a354", "title": "Addressing Irregular Patterns of Matrix Computations on GPUs and Their Impact on Applications Powered by Sparse Direct Solvers", "doi": null, "abstractUrl": "/proceedings-article/sc/2022/544400a354/1L07kgNo1BS", "parentPublication": { "id": "proceedings/sc/2022/5444/0/", "title": "SC22: International Conference for High Performance Computing, Networking, Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candarw/2020/9919/0/991900a178", "title": "Performance Evaluation of Accurate Matrix-Matrix Multiplication on GPU Using Sparse Matrix Multiplications", "doi": null, "abstractUrl": "/proceedings-article/candarw/2020/991900a178/1rqECQOZh6g", "parentPublication": { "id": "proceedings/candarw/2020/9919/0", "title": "2020 Eighth International Symposium on Computing and Networking Workshops (CANDARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvk7JRJ", "title": "Proceedings of the Twenty-Seventh Annual Hawaii International Conference on System Sciences", "acronym": "hicss", "groupId": "1000730", "volume": "2", "displayVolume": "2", "year": "1994", "__typename": "ProceedingType" }, "article": { "id": "12OmNwO5LUx", "doi": "10.1109/HICSS.1994.323237", "title": "Experience with automatic, dynamic load balancing and adaptive finite element computation", "normalizedTitle": "Experience with automatic, dynamic load balancing and adaptive finite element computation", "abstract": "We describe a fine-grained, element-based data migration system that dynamically maintains global load balance on massively parallel MIMD computers, and is effective in the presence of changing work loads. Global load balance is achieved by overlapping neighborhoods of processors. Where each neighborhood performs local load balancing. The method supports a large class of finite element and finite difference based applications and provides an automatic element management system to which applications are easily integrated. We test the system's effectiveness with an adaptive order (p-) refinement discontinuous Galerkin finite element method for the solution of hyperbolic conservation laws on 1024-processor nCUBE2. The results show the significant reduction in execution time synergistically obtained by combining the automatic data migration system and the adaptive finite element method.<>", "abstracts": [ { "abstractType": "Regular", "content": "We describe a fine-grained, element-based data migration system that dynamically maintains global load balance on massively parallel MIMD computers, and is effective in the presence of changing work loads. Global load balance is achieved by overlapping neighborhoods of processors. Where each neighborhood performs local load balancing. The method supports a large class of finite element and finite difference based applications and provides an automatic element management system to which applications are easily integrated. We test the system's effectiveness with an adaptive order (p-) refinement discontinuous Galerkin finite element method for the solution of hyperbolic conservation laws on 1024-processor nCUBE2. The results show the significant reduction in execution time synergistically obtained by combining the automatic data migration system and the adaptive finite element method.<>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We describe a fine-grained, element-based data migration system that dynamically maintains global load balance on massively parallel MIMD computers, and is effective in the presence of changing work loads. Global load balance is achieved by overlapping neighborhoods of processors. Where each neighborhood performs local load balancing. The method supports a large class of finite element and finite difference based applications and provides an automatic element management system to which applications are easily integrated. We test the system's effectiveness with an adaptive order (p-) refinement discontinuous Galerkin finite element method for the solution of hyperbolic conservation laws on 1024-processor nCUBE2. The results show the significant reduction in execution time synergistically obtained by combining the automatic data migration system and the adaptive finite element method.", "fno": "00323237", "keywords": [ "Distributed Memory Systems", "Finite Element Analysis", "Finite Difference Methods", "Resource Allocation", "Computational Complexity", "Parallel Algorithms", "Dynamic Load Balancing", "Adaptive Finite Element Computation", "Element Based Data Migration System", "Massively Parallel MIMD Computers", "Finite Difference", "Automatic Element Management System", "Discontinuous Galerkin Finite Element Method", "Hyperbolic Conservation Laws", "N CUBE 2", "Execution Time", "Distributed Memory", "Distributed Memories", "Finite Element Methods", "Finite Difference Methods", "Resource Management", "Computation Time", "Parallel Algorithms" ], "authors": [ { "affiliation": "MP Comput. Res. Lab., Sandia Nat. Labs., Albuquerque, NM, USA", "fullName": "Wheat", "givenName": null, "surname": "Wheat", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Devine", "givenName": null, "surname": "Devine", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Maccabe", "givenName": null, "surname": "Maccabe", "__typename": "ArticleAuthorType" } ], "idPrefix": "hicss", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "1994-01-01T00:00:00", "pubType": "proceedings", "pages": "463-472", "year": "1994", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00323236", "articleId": "12OmNzdoMIW", "__typename": "AdjacentArticleType" }, "next": { "fno": "00323238", "articleId": "12OmNBTJIB8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpads/1994/6555/0/00590130", "title": "A parallel run-time iterative load balancing algorithm for solution-adaptive finite element meshes on hypercubes", "doi": null, "abstractUrl": "/proceedings-article/icpads/1994/00590130/12OmNApu5L0", "parentPublication": { "id": "proceedings/icpads/1994/6555/0", "title": "Proceedings of 1994 International Conference on Parallel and Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcse/2010/4303/2/4303b325", "title": "A Combined Finite-Element and Finite-Volume Method in Reservoir Simulation", "doi": null, "abstractUrl": "/proceedings-article/wcse/2010/4303b325/12OmNCb3ftt", "parentPublication": { "id": "proceedings/wcse/2010/4303/2", "title": "2010 Second World Congress on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icic/2010/7081/4/05514055", "title": "Nonlinear Finite Element Computation on Semi-rigid Connection and Steel Frame", "doi": null, "abstractUrl": "/proceedings-article/icic/2010/05514055/12OmNvjgWyV", "parentPublication": { "id": "proceedings/icic/2010/7081/4", "title": "2010 Third International Conference on Information and Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wmsvm/2010/7077/0/05558282", "title": "Finite Element Analysis of Long-Scale Bag Filter", "doi": null, "abstractUrl": "/proceedings-article/wmsvm/2010/05558282/12OmNxjjEho", "parentPublication": { "id": "proceedings/wmsvm/2010/7077/0", "title": "2010 Second International Conference on Modeling, Simulation and Visualization Methods (WMSVM 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sccc/1997/8052/0/80520246", "title": "Load balancing and communication optimization for parallel adaptive finite element methods", "doi": null, "abstractUrl": "/proceedings-article/sccc/1997/80520246/12OmNym2c3S", "parentPublication": { "id": "proceedings/sccc/1997/8052/0", "title": "Proceedings 17th International Conference of the Chilean Computer Science Society", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/1993/4340/0/01263415", "title": "A massively parallel adaptive finite element method with dynamic load balancing", "doi": null, "abstractUrl": "/proceedings-article/sc/1993/01263415/12OmNyv7mng", "parentPublication": { "id": "proceedings/sc/1993/4340/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fmpc/1988/5892/0/00047479", "title": "A parallel algorithm for finite element computation", "doi": null, "abstractUrl": "/proceedings-article/fmpc/1988/00047479/12OmNzy7uOW", "parentPublication": { "id": "proceedings/fmpc/1988/5892/0", "title": "Proceedings 2nd Symposium on the Frontiers of Massively Parallel Computation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/1999/04/l0360", "title": "Tree-Based Parallel Load-Balancing Methods for Solution-Adaptive Finite Element Graphs on Distributed Memory Multicomputers", "doi": null, "abstractUrl": "/journal/td/1999/04/l0360/13rRUILtJyY", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/1993/4340/0/01263415", "title": "A massively parallel adaptive finite element method with dynamic load balancing", "doi": null, "abstractUrl": "/proceedings-article/sc/1993/01263415/1D85xfdu7tu", "parentPublication": { "id": "proceedings/sc/1993/4340/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcmeim/2020/4109/0/410900a603", "title": "Finite Element Analysis and Topology Optimization of Triangular Track&#x2019;s Load-bearing Wheels", "doi": null, "abstractUrl": "/proceedings-article/wcmeim/2020/410900a603/1t2mESgw1jy", "parentPublication": { "id": "proceedings/wcmeim/2020/4109/0", "title": "2020 3rd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKiq6", "title": "2018 IEEE International Conference on Cluster Computing (CLUSTER)", "acronym": "cluster", "groupId": "1000095", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WGGoLB", "doi": "10.1109/CLUSTER.2018.00076", "title": "UnSNAP: A Mini-App for Exploring the Performance of Deterministic Discrete Ordinates Transport on Unstructured Meshes", "normalizedTitle": "UnSNAP: A Mini-App for Exploring the Performance of Deterministic Discrete Ordinates Transport on Unstructured Meshes", "abstract": "Solving the deterministic discrete ordinates neutral particle transport equation is a computationally expensive application. On an unstructured mesh, the discontinuous Galerkin finite element method is used for discretisation of the spatial domain. Additionally, an upwind dependency is applied forming wavefront sweeps across the spatial mesh for each iteration of the solve. We present a new mini-app, UnSNAP, which can be used to investigate the performance of arbitrarily high-order finite element unstructured transport on modern architectures. A new schedule appropriate for such architectures is presented. Finally, we show performance results for the mini-app on CPUs with high numbers of cores.", "abstracts": [ { "abstractType": "Regular", "content": "Solving the deterministic discrete ordinates neutral particle transport equation is a computationally expensive application. On an unstructured mesh, the discontinuous Galerkin finite element method is used for discretisation of the spatial domain. Additionally, an upwind dependency is applied forming wavefront sweeps across the spatial mesh for each iteration of the solve. We present a new mini-app, UnSNAP, which can be used to investigate the performance of arbitrarily high-order finite element unstructured transport on modern architectures. A new schedule appropriate for such architectures is presented. Finally, we show performance results for the mini-app on CPUs with high numbers of cores.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Solving the deterministic discrete ordinates neutral particle transport equation is a computationally expensive application. On an unstructured mesh, the discontinuous Galerkin finite element method is used for discretisation of the spatial domain. Additionally, an upwind dependency is applied forming wavefront sweeps across the spatial mesh for each iteration of the solve. We present a new mini-app, UnSNAP, which can be used to investigate the performance of arbitrarily high-order finite element unstructured transport on modern architectures. A new schedule appropriate for such architectures is presented. Finally, we show performance results for the mini-app on CPUs with high numbers of cores.", "fno": "831900a598", "keywords": [ "Galerkin Method", "Mesh Generation", "Neutron Transport Theory", "Arbitrarily High Order Finite Element Unstructured Transport", "CPU", "Spatial Mesh", "Wavefront Sweeps", "Upwind Dependency", "Spatial Domain", "Discontinuous Galerkin Finite Element Method", "Computationally Expensive Application", "Deterministic Discrete Ordinates Neutral Particle Transport Equation", "Unstructured Mesh", "Deterministic Discrete Ordinates Transport", "Mini App", "Un SNAP", "Mathematical Model", "Finite Element Analysis", "Method Of Moments", "Scattering", "Diamond", "Computer Architecture", "Schedules", "Deterministic Discrete Ordinates Transport Finite Element Discontinuous Galerkin Sweep Unstructured Mesh" ], "authors": [ { "affiliation": null, "fullName": "Tom Deakin", "givenName": "Tom", "surname": "Deakin", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Simon McIntosh-Smith", "givenName": "Simon", "surname": "McIntosh-Smith", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Justin Lovegrove", "givenName": "Justin", "surname": "Lovegrove", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Richard Smedley-Stevenson", "givenName": "Richard", "surname": "Smedley-Stevenson", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Andrew Hagues", "givenName": "Andrew", "surname": "Hagues", "__typename": "ArticleAuthorType" } ], "idPrefix": "cluster", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-09-01T00:00:00", "pubType": "proceedings", "pages": "598-606", "year": "2018", "issn": null, "isbn": "978-1-5386-8319-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "831900a590", "articleId": "17D45WIXbPB", "__typename": "AdjacentArticleType" }, "next": { "fno": "831900a607", "articleId": "17D45VsBTZL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/big-data/2016/9005/0/07840641", "title": "Accelerating range queries for large-scale unstructured meshes", "doi": null, "abstractUrl": "/proceedings-article/big-data/2016/07840641/12OmNAWpyoq", "parentPublication": { "id": "proceedings/big-data/2016/9005/0", "title": "2016 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598a721", "title": "Performance Evaluation of Unstructured Mesh Physics on Advanced Architectures", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598a721/12OmNx8OuyL", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598a729", "title": "Expressing Parallelism on Many-Core for Deterministic Discrete Ordinates Transport", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598a729/12OmNxE2mQy", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pdp/2018/4975/0/497501a068", "title": "Developing and Using a Geometric Multigrid, Unstructured Grid Mini-Application to Assess Many-Core Architectures", "doi": null, "abstractUrl": "/proceedings-article/pdp/2018/497501a068/12OmNykkB6P", "parentPublication": { "id": "proceedings/pdp/2018/4975/0", "title": "2018 26th Euromicro International Conference on Parallel, Distributed and Network-based Processing (PDP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598a785", "title": "CMT-bone: A Mini-App for Compressible Multiphase Turbulence Simulation Software", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598a785/12OmNzVXNYU", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532825", "title": "Marching diamonds for unstructured meshes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532825/12OmNzYeAMV", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2017/2326/0/2326a498", "title": "Exploring On-Node Parallelism with Neutral, a Monte Carlo Neutral Particle Transport Mini-App", "doi": null, "abstractUrl": "/proceedings-article/cluster/2017/2326a498/12OmNzYeAYu", "parentPublication": { "id": "proceedings/cluster/2017/2326/0", "title": "2017 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/03/ttg2011030305", "title": "A Comparison of Gradient Estimation Methods for Volume Rendering on Unstructured Meshes", "doi": null, "abstractUrl": "/journal/tg/2011/03/ttg2011030305/13rRUx0xPi5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/03/ttp2011030471", "title": "Efficient 3D Geometric and Zernike Moments Computation from Unstructured Surface Meshes", "doi": null, "abstractUrl": "/journal/tp/2011/03/ttp2011030471/13rRUxASuHp", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2018/8319/0/831900a615", "title": "BookLeaf: An Unstructured Hydrodynamics Mini-Application", "doi": null, "abstractUrl": "/proceedings-article/cluster/2018/831900a615/17D45VtKiza", "parentPublication": { "id": "proceedings/cluster/2018/8319/0", "title": "2018 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1bXcRsIpmNi", "title": "2018 5th International Conference on Mathematics and Computers in Sciences and Industry (MCSI)", "acronym": "mcsi", "groupId": "1811864", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "1bXcTiHuudy", "doi": "10.1109/MCSI.2018.00015", "title": "Smoothness of Spaces in Finite Element Methods", "normalizedTitle": "Smoothness of Spaces in Finite Element Methods", "abstract": "The smoothness of functions is absolutely essential in the case of space of functions in finite element method (FEM): incompatible FEM slowly converges and has evaluations in nonstandard metrics. Interest in smooth approximate spaces is supported by the desire to have a coincidence of smoothness of exact solution and approximate one. The construction of smooth approximating spaces is the main problem of the finite element method. A lot of papers have been devoted to this problem. The aim of the paper is the obtaining of the necessary and sufficient conditions for the smoothness of coordinate functions provided that the last ones are received by approximate relations which are a generalization of Strang-Michlin's conditions. The relations mentioned above discussed on cell decomposition of differentiable manifold. The smoothness of coordinate functions inside of cells coincides with the smoothness of generating vector function of the right side of approximate relations so that the main question is the smoothness of transition through the boundary of adjacent cells. The smoothness in this case is the equality of values of functionals with supports in the adjacent cells. The obtained results give opportunity to verify the smoothness on the boundary of support of basic functions and after that to assert that basic functions are smooth on the whole. In conclusion it is possible to say that this paper discusses the smoothness as the general case of equality of linear functionals with supports in adjacent cells of differentiable manifold. The result may be applied to different sorts of smoothness, for example, to mean smoothness and to weight smoothness.", "abstracts": [ { "abstractType": "Regular", "content": "The smoothness of functions is absolutely essential in the case of space of functions in finite element method (FEM): incompatible FEM slowly converges and has evaluations in nonstandard metrics. Interest in smooth approximate spaces is supported by the desire to have a coincidence of smoothness of exact solution and approximate one. The construction of smooth approximating spaces is the main problem of the finite element method. A lot of papers have been devoted to this problem. The aim of the paper is the obtaining of the necessary and sufficient conditions for the smoothness of coordinate functions provided that the last ones are received by approximate relations which are a generalization of Strang-Michlin's conditions. The relations mentioned above discussed on cell decomposition of differentiable manifold. The smoothness of coordinate functions inside of cells coincides with the smoothness of generating vector function of the right side of approximate relations so that the main question is the smoothness of transition through the boundary of adjacent cells. The smoothness in this case is the equality of values of functionals with supports in the adjacent cells. The obtained results give opportunity to verify the smoothness on the boundary of support of basic functions and after that to assert that basic functions are smooth on the whole. In conclusion it is possible to say that this paper discusses the smoothness as the general case of equality of linear functionals with supports in adjacent cells of differentiable manifold. The result may be applied to different sorts of smoothness, for example, to mean smoothness and to weight smoothness.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The smoothness of functions is absolutely essential in the case of space of functions in finite element method (FEM): incompatible FEM slowly converges and has evaluations in nonstandard metrics. Interest in smooth approximate spaces is supported by the desire to have a coincidence of smoothness of exact solution and approximate one. The construction of smooth approximating spaces is the main problem of the finite element method. A lot of papers have been devoted to this problem. The aim of the paper is the obtaining of the necessary and sufficient conditions for the smoothness of coordinate functions provided that the last ones are received by approximate relations which are a generalization of Strang-Michlin's conditions. The relations mentioned above discussed on cell decomposition of differentiable manifold. The smoothness of coordinate functions inside of cells coincides with the smoothness of generating vector function of the right side of approximate relations so that the main question is the smoothness of transition through the boundary of adjacent cells. The smoothness in this case is the equality of values of functionals with supports in the adjacent cells. The obtained results give opportunity to verify the smoothness on the boundary of support of basic functions and after that to assert that basic functions are smooth on the whole. In conclusion it is possible to say that this paper discusses the smoothness as the general case of equality of linear functionals with supports in adjacent cells of differentiable manifold. The result may be applied to different sorts of smoothness, for example, to mean smoothness and to weight smoothness.", "fno": "750000a024", "keywords": [ "Approximation Theory", "Finite Element Analysis", "Vectors", "Approximate Relations", "Coordinate Functions", "Finite Element Method", "Smooth Approximate Spaces", "Strang Michlin Conditions", "Cell Decomposition", "Vector Function", "Linear Functionals", "Manifolds", "Finite Element Analysis", "Splines Mathematics", "Convergence", "Standards", "Topology", "Extraterrestrial Measurements", "Finite Element Method", "General Smoothness", "Approximate Conditions", "Minimal Splines", "Approximation On Manifold" ], "authors": [ { "affiliation": "Department of Parallel Algorithms St. Petersburg State University", "fullName": "Yuri K. Dem’yanovich", "givenName": "Yuri K.", "surname": "Dem’yanovich", "__typename": "ArticleAuthorType" } ], "idPrefix": "mcsi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-08-01T00:00:00", "pubType": "proceedings", "pages": "24-28", "year": "2018", "issn": null, "isbn": "978-1-5386-7500-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "750000a020", "articleId": "1bXcRyWrPH2", "__typename": "AdjacentArticleType" }, "next": { "fno": "750000a029", "articleId": "1bXcRMdkXM4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/synasc/2014/8447/0/07034730", "title": "Enhancing Dental Radiographic Images in Spline-Type Spaces", "doi": null, "abstractUrl": "/proceedings-article/synasc/2014/07034730/12OmNAfPIOV", "parentPublication": { "id": "proceedings/synasc/2014/8447/0", "title": "2014 16th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2014/7434/0/7434a352", "title": "Variational Image Decomposition in Shearlet Smoothness Spaces", "doi": null, "abstractUrl": "/proceedings-article/cis/2014/7434a352/12OmNrMHOiZ", "parentPublication": { "id": "proceedings/cis/2014/7434/0", "title": "2014 Tenth International Conference on Computational Intelligence and Security (CIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icita/2005/2316/1/231610617", "title": "Image Deblurring via Smoothness-Switching on H?lder Spaces", "doi": null, "abstractUrl": "/proceedings-article/icita/2005/231610617/12OmNscxj5T", "parentPublication": { "id": "proceedings/icita/2005/2316/1", "title": "Proceedings. Third International Conference on Information Technology and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pbmcv/1995/7021/0/00514672", "title": "Nonlinear finite element methods for nonrigid motion analysis", "doi": null, "abstractUrl": "/proceedings-article/pbmcv/1995/00514672/12OmNxuo0lg", "parentPublication": { "id": "proceedings/pbmcv/1995/7021/0", "title": "Proceedings of the Workshop on Physics-Based Modeling in Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030680", "title": "Investigation of Smoothness-Increasing Accuracy-Conserving Filters for Improving Streamline Integration through Discontinuous Fields", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030680/13rRUwvT9gm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192734", "title": "Adaptive Multilinear Tensor Product Wavelets", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192734/13rRUxcsYLR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/07/07533440", "title": "Randomly Perturbed B-Splines for Nonrigid Image Registration", "doi": null, "abstractUrl": "/journal/tp/2017/07/07533440/13rRUyYSWmh", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a869", "title": "SplineCNN: Fast Geometric Deep Learning with Continuous B-Spline Kernels", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a869/17D45WZZ7Dt", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cscc/2020/6503/0/650300a121", "title": "Wavelet Decomposition for Generalized Haar Spaces", "doi": null, "abstractUrl": "/proceedings-article/cscc/2020/650300a121/1t2mRH9jNGU", "parentPublication": { "id": "proceedings/cscc/2020/6503/0", "title": "2020 24th International Conference on Circuits, Systems, Communications and Computers (CSCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cscc/2020/6503/0/650300a135", "title": "On Construction of Singular Splines", "doi": null, "abstractUrl": "/proceedings-article/cscc/2020/650300a135/1t2mWTao4uI", "parentPublication": { "id": "proceedings/cscc/2020/6503/0", "title": "2020 24th International Conference on Circuits, Systems, Communications and Computers (CSCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pXm1l1PyXm", "title": "2020 IEEE/ACM Workshop on Memory Centric High Performance Computing (MCHPC)", "acronym": "mchpc", "groupId": "1834807", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pXm1QgQgzC", "doi": "10.1109/MCHPC51950.2020.00010", "title": "Hostile Cache Implications for Small, Dense Linear Solves", "normalizedTitle": "Hostile Cache Implications for Small, Dense Linear Solves", "abstract": "The full assembly of the stiffness matrix in finite element codes can be prohibitive in terms of memory footprint resulting from storing that enormous matrix. An optimisation and work around, particularly effective for discontinuous Galerkin based approaches, is to construct and solve the small dense linear systems locally within each element and avoid the global assembly entirely. The different independent linear systems can be solved concurrently in a batched manner, however we have found that the memory subsystem can show destructive behaviour in this paradigm, severely affecting the performance. In this paper we demonstrate the range of performance that can be obtained by allocating the local systems differently, along with evidence to attribute the reasons behind these differences.", "abstracts": [ { "abstractType": "Regular", "content": "The full assembly of the stiffness matrix in finite element codes can be prohibitive in terms of memory footprint resulting from storing that enormous matrix. An optimisation and work around, particularly effective for discontinuous Galerkin based approaches, is to construct and solve the small dense linear systems locally within each element and avoid the global assembly entirely. The different independent linear systems can be solved concurrently in a batched manner, however we have found that the memory subsystem can show destructive behaviour in this paradigm, severely affecting the performance. In this paper we demonstrate the range of performance that can be obtained by allocating the local systems differently, along with evidence to attribute the reasons behind these differences.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The full assembly of the stiffness matrix in finite element codes can be prohibitive in terms of memory footprint resulting from storing that enormous matrix. An optimisation and work around, particularly effective for discontinuous Galerkin based approaches, is to construct and solve the small dense linear systems locally within each element and avoid the global assembly entirely. The different independent linear systems can be solved concurrently in a batched manner, however we have found that the memory subsystem can show destructive behaviour in this paradigm, severely affecting the performance. In this paper we demonstrate the range of performance that can be obtained by allocating the local systems differently, along with evidence to attribute the reasons behind these differences.", "fno": "106600a034", "keywords": [ "Finite Element Analysis", "Galerkin Method", "Memory Footprint", "Enormous Matrix", "Optimisation", "Discontinuous Galerkin Based Approaches", "Dense Linear Systems", "Global Assembly", "Independent Linear Systems", "Memory Subsystem", "Destructive Behaviour", "Local Systems", "Hostile Cache Implications", "Stiffness Matrix", "Finite Element Codes", "Finite Element Analysis", "Linear Systems", "Instruction Sets", "Resource Management", "Runtime", "Parallel Processing", "Bandwidth", "Finite Element Method", "Batched Linear Algebra", "Cache", "Memory Allocation" ], "authors": [ { "affiliation": "University of Bristol,Department of Computer Science,UK", "fullName": "Tom Deakin", "givenName": "Tom", "surname": "Deakin", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Bristol,Department of Computer Science,UK", "fullName": "James Cownie", "givenName": "James", "surname": "Cownie", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Bristol,Department of Computer Science,UK", "fullName": "Simon McIntosh-Smith", "givenName": "Simon", "surname": "McIntosh-Smith", "__typename": "ArticleAuthorType" }, { "affiliation": "Computational Physics Group, Atomic Weapons Establishment,Aldermaston,UK", "fullName": "Justin Lovegrove", "givenName": "Justin", "surname": "Lovegrove", "__typename": "ArticleAuthorType" }, { "affiliation": "Computational Physics Group, Atomic Weapons Establishment,Aldermaston,UK", "fullName": "Richard Smedley-Stevenson", "givenName": "Richard", "surname": "Smedley-Stevenson", "__typename": "ArticleAuthorType" } ], "idPrefix": "mchpc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "34-41", "year": "2020", "issn": null, "isbn": "978-1-6654-2278-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "106600a025", "articleId": "1pXm1r3Fhgk", "__typename": "AdjacentArticleType" }, "next": { "fno": "106600a042", "articleId": "1pXm1N3zRgk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/asap/2013/0494/0/06567597", "title": "GPU acceleration of Data Assembly in Finite Element Methods and its energy implications", "doi": null, "abstractUrl": "/proceedings-article/asap/2013/06567597/12OmNvA1h43", "parentPublication": { "id": "proceedings/asap/2013/0494/0", "title": "2013 IEEE 24th International Conference on Application-Specific Systems, Architectures and Processors", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-icess/2012/4749/0/4749b307", "title": "Fast sparse matrix-vector multiplication on graphics processing unit for finite element analysis", "doi": null, "abstractUrl": "/proceedings-article/hpcc-icess/2012/4749b307/12OmNvFHfCz", "parentPublication": { "id": "proceedings/hpcc-icess/2012/4749/0", "title": "High Performance Computing and Communication &amp; IEEE International Conference on Embedded Software and Systems, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2013/5016/0/5016a287", "title": "Application of Element-Free Galerkin Method for Axis-Symmetric Heat Transfer Problems", "doi": null, "abstractUrl": "/proceedings-article/icdma/2013/5016a287/12OmNvkGWb6", "parentPublication": { "id": "proceedings/icdma/2013/5016/0", "title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pdp/2018/4975/0/497501a255", "title": "Data-Layout Reorganization for an Efficient Intra-Node Assembly of a Spectral Finite-Element Method", "doi": null, "abstractUrl": "/proceedings-article/pdp/2018/497501a255/12OmNwHQB2J", "parentPublication": { "id": "proceedings/pdp/2018/4975/0", "title": "2018 26th Euromicro International Conference on Parallel, Distributed and Network-based Processing (PDP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/1994/5090/2/00323237", "title": "Experience with automatic, dynamic load balancing and adaptive finite element computation", "doi": null, "abstractUrl": "/proceedings-article/hicss/1994/00323237/12OmNwO5LUx", "parentPublication": { "id": "proceedings/hicss/1994/5090/2", "title": "Proceedings of the Twenty-Seventh Annual Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icppw/2006/2637/0/26370505", "title": "An Efficient Parallel Finite-Element-Based Domain Decomposition Iterative Technique With Polynomial Preconditioning", "doi": null, "abstractUrl": "/proceedings-article/icppw/2006/26370505/12OmNzzP5zh", "parentPublication": { "id": "proceedings/icppw/2006/2637/0", "title": "2006 International Conference on Parallel Processing Workshops (ICPPW'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030680", "title": "Investigation of Smoothness-Increasing Accuracy-Conserving Filters for Improving Streamline Integration through Discontinuous Fields", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030680/13rRUwvT9gm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdrm/2022/7508/0/750800a001", "title": "SABO: Dynamic MPI+OpenMP Resource Balancer", "doi": null, "abstractUrl": "/proceedings-article/ipdrm/2022/750800a001/1KhjDhcuyPK", "parentPublication": { "id": "proceedings/ipdrm/2022/7508/0", "title": "2022 IEEE/ACM Fifth Annual Workshop on Emerging Parallel and Distributed Runtime Systems and Middleware (IPDRM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wscad/2018/3772/0/377200a272", "title": "Acceleration of a Computational Simulation Application for Radiofrequency Ablation Procedure Using GPU", "doi": null, "abstractUrl": "/proceedings-article/wscad/2018/377200a272/1bhIcu4KjDO", "parentPublication": { "id": "proceedings/wscad/2018/3772/0", "title": "2018 Symposium on High Performance Computing Systems (WSCAD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hipc/2020/2292/0/229200a305", "title": "Batched Small Tensor-Matrix Multiplications on GPUs", "doi": null, "abstractUrl": "/proceedings-article/hipc/2020/229200a305/1taEZeE2ziE", "parentPublication": { "id": "proceedings/hipc/2020/2292/0", "title": "2020 IEEE 27th International Conference on High Performance Computing, Data, and Analytics (HiPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzIUg01", "title": "2009 Conference for Visual Media Production", "acronym": "cvmp", "groupId": "1003129", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNAle6xC", "doi": "10.1109/CVMP.2009.27", "title": "Skeleton Driven Laplacian Volumetric Deformation", "normalizedTitle": "Skeleton Driven Laplacian Volumetric Deformation", "abstract": "This paper proposes a novel mesh animation technique which combines the flexible interactive control of skeleton based animation rigs with volumetric mesh deformation to avoid mesh collapse and self-intersection under folding and twisting motion. Our solution combines the industry standard Linear Skin Blending with a mesh based volumetric deformation approach. Linear Skin Blending is used to attach and efficiently animate a small number of points with a skeletal control rig. These points provide constraints for a Laplacian mesh deformation scheme which solves for the mesh which satisfies the constraints and gives minimum volume deformation of a tetrahedralization of the mesh vertices. This approach allows rigging and animation of high-resolution captured surface meshes from multiple view video or 3D scans. Interactive skeleton driven animation is achieved for meshes of several thousand vertices without the known drawbacks of Linear Skin Blending, mesh collapse around joints and the ’candy wrapper effect’.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes a novel mesh animation technique which combines the flexible interactive control of skeleton based animation rigs with volumetric mesh deformation to avoid mesh collapse and self-intersection under folding and twisting motion. Our solution combines the industry standard Linear Skin Blending with a mesh based volumetric deformation approach. Linear Skin Blending is used to attach and efficiently animate a small number of points with a skeletal control rig. These points provide constraints for a Laplacian mesh deformation scheme which solves for the mesh which satisfies the constraints and gives minimum volume deformation of a tetrahedralization of the mesh vertices. This approach allows rigging and animation of high-resolution captured surface meshes from multiple view video or 3D scans. Interactive skeleton driven animation is achieved for meshes of several thousand vertices without the known drawbacks of Linear Skin Blending, mesh collapse around joints and the ’candy wrapper effect’.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes a novel mesh animation technique which combines the flexible interactive control of skeleton based animation rigs with volumetric mesh deformation to avoid mesh collapse and self-intersection under folding and twisting motion. Our solution combines the industry standard Linear Skin Blending with a mesh based volumetric deformation approach. Linear Skin Blending is used to attach and efficiently animate a small number of points with a skeletal control rig. These points provide constraints for a Laplacian mesh deformation scheme which solves for the mesh which satisfies the constraints and gives minimum volume deformation of a tetrahedralization of the mesh vertices. This approach allows rigging and animation of high-resolution captured surface meshes from multiple view video or 3D scans. Interactive skeleton driven animation is achieved for meshes of several thousand vertices without the known drawbacks of Linear Skin Blending, mesh collapse around joints and the ’candy wrapper effect’.", "fno": "3893a061", "keywords": [ "Volumetric Deformation", "Skeleton Driven" ], "authors": [ { "affiliation": null, "fullName": "C. Budd", "givenName": "C.", "surname": "Budd", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "A. Hilton", "givenName": "A.", "surname": "Hilton", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvmp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-11-01T00:00:00", "pubType": "proceedings", "pages": "61-68", "year": "2009", "issn": null, "isbn": "978-0-7695-3893-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3893a052", "articleId": "12OmNqI04M6", "__typename": "AdjacentArticleType" }, "next": { "fno": "3893a081", "articleId": "12OmNylboMl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2010/4166/0/4166a041", "title": "Character Skin Deformation: A Survey", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2010/4166a041/12OmNApu5fG", "parentPublication": { "id": "proceedings/cgiv/2010/4166/0", "title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/2000/0683/0/06830002", "title": "Integrated System for Skin Deformation", "doi": null, "abstractUrl": "/proceedings-article/ca/2000/06830002/12OmNAtaRXK", "parentPublication": { "id": "proceedings/ca/2000/0683/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cadgraphics/2011/4497/0/4497a306", "title": "Lattice-Based Skinning and Deformation for Real-Time Skeleton-Driven Animation", "doi": null, "abstractUrl": "/proceedings-article/cadgraphics/2011/4497a306/12OmNBEGYGs", "parentPublication": { "id": "proceedings/cadgraphics/2011/4497/0", "title": "Computer-Aided Design and Computer Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cadgraphics/2011/4497/0/4497a302", "title": "Robust Deformation Transfer via Dual Domain", "doi": null, "abstractUrl": "/proceedings-article/cadgraphics/2011/4497a302/12OmNwDj0Xk", "parentPublication": { "id": "proceedings/cadgraphics/2011/4497/0", "title": "Computer-Aided Design and Computer Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2009/3641/0/3641b068", "title": "A Framework for Real-time Local Free-Form Deformation", "doi": null, "abstractUrl": "/proceedings-article/icis/2009/3641b068/12OmNweBUIN", "parentPublication": { "id": "proceedings/icis/2009/3641/0", "title": "Computer and Information Science, ACIS International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pma/2009/3988/0/3988a395", "title": "Efficient Simulating Interactive Deformation of Virtual Plant", "doi": null, "abstractUrl": "/proceedings-article/pma/2009/3988a395/12OmNxE2mNM", "parentPublication": { "id": "proceedings/pma/2009/3988/0", "title": "Plant Growth Modeling and Applications, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cso/2010/4030/2/4030b447", "title": "2D Cartoon Character Deformation by Sketch Skeleton", "doi": null, "abstractUrl": "/proceedings-article/cso/2010/4030b447/12OmNxWLTkO", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2008/3358/0/3358a278", "title": "Shape Aware Deformation Using a Skeleton-Guided Scheme", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2008/3358a278/12OmNzvz6Ou", "parentPublication": { "id": "proceedings/sibgrapi/2008/3358/0", "title": "2008 XXI Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030693", "title": "Shape Deformation Using a Skeleton to Drive Simplex Transformations", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030693/13rRUxjQyp7", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/03/08839414", "title": "Sparse Data Driven Mesh Deformation", "doi": null, "abstractUrl": "/journal/tg/2021/03/08839414/1dqsrINsJsk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzgNXZq", "title": "Networked Computing and Advanced Information Management, International Conference on", "acronym": "ncm", "groupId": "1002957", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNqI04VH", "doi": "10.1109/NCM.2009.36", "title": "The Smoothed 3D Skeleton for Animation", "normalizedTitle": "The Smoothed 3D Skeleton for Animation", "abstract": "Skeleton is at the main interest of 3D character animation. In this paper, we use a recent skeleton pruning method based on the medial axis transform and the geodesic distance function that originates one-voxel thick, graph-like skeleton. Unfortunately, the location of skeleton joints does not usually match the anatomical joint of the model. We introduce a novel method for smoothing the pruned skeleton for realistic character animation by using the filtering process for adjusting the locations of consecutive points along the skeleton. Chord-to-point distance accumulation then be applied, and the smoothed skeleton is split in order to create segments and joints corresponding to its shape. The new skeleton can be regenerated later on. Therefore, the new skeleton produced from the proposed method can capture the essential shape characteristics in a compact form, while preserving the meaningful anatomical information of the 3D character models. The demonstration of the approach with several examples is also provided.", "abstracts": [ { "abstractType": "Regular", "content": "Skeleton is at the main interest of 3D character animation. In this paper, we use a recent skeleton pruning method based on the medial axis transform and the geodesic distance function that originates one-voxel thick, graph-like skeleton. Unfortunately, the location of skeleton joints does not usually match the anatomical joint of the model. We introduce a novel method for smoothing the pruned skeleton for realistic character animation by using the filtering process for adjusting the locations of consecutive points along the skeleton. Chord-to-point distance accumulation then be applied, and the smoothed skeleton is split in order to create segments and joints corresponding to its shape. The new skeleton can be regenerated later on. Therefore, the new skeleton produced from the proposed method can capture the essential shape characteristics in a compact form, while preserving the meaningful anatomical information of the 3D character models. The demonstration of the approach with several examples is also provided.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Skeleton is at the main interest of 3D character animation. In this paper, we use a recent skeleton pruning method based on the medial axis transform and the geodesic distance function that originates one-voxel thick, graph-like skeleton. Unfortunately, the location of skeleton joints does not usually match the anatomical joint of the model. We introduce a novel method for smoothing the pruned skeleton for realistic character animation by using the filtering process for adjusting the locations of consecutive points along the skeleton. Chord-to-point distance accumulation then be applied, and the smoothed skeleton is split in order to create segments and joints corresponding to its shape. The new skeleton can be regenerated later on. Therefore, the new skeleton produced from the proposed method can capture the essential shape characteristics in a compact form, while preserving the meaningful anatomical information of the 3D character models. The demonstration of the approach with several examples is also provided.", "fno": "3769b348", "keywords": [ "Skeleton", "Skeleton Smoothing", "Skeleton Pruning", "Chord To Point Distance Accumulation" ], "authors": [ { "affiliation": null, "fullName": "Porawat Visutsak", "givenName": "Porawat", "surname": "Visutsak", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Korakot Prachumrak", "givenName": "Korakot", "surname": "Prachumrak", "__typename": "ArticleAuthorType" } ], "idPrefix": "ncm", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-08-01T00:00:00", "pubType": "proceedings", "pages": "1348-1353", "year": "2009", "issn": null, "isbn": "978-0-7695-3769-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3769b344", "articleId": "12OmNyz5JSh", "__typename": "AdjacentArticleType" }, "next": { "fno": "3769b354", "articleId": "12OmNyr8YrC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sitis/2017/4283/0/4283a339", "title": "Recurrent Neural Network Based Action Recognition from 3D Skeleton Data", "doi": null, "abstractUrl": "/proceedings-article/sitis/2017/4283a339/12OmNBQC88B", "parentPublication": { "id": "proceedings/sitis/2017/4283/0", "title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2003/2028/0/20280409", "title": "Automatic Animation Skeleton Construction Using Repulsive Force Field", "doi": null, "abstractUrl": "/proceedings-article/pg/2003/20280409/12OmNxWLTGc", "parentPublication": { "id": "proceedings/pg/2003/2028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vv/1998/9180/0/91800047", "title": "Volume Animation Using the Skeleton Tree", "doi": null, "abstractUrl": "/proceedings-article/vv/1998/91800047/12OmNyLiuwI", "parentPublication": { "id": "proceedings/vv/1998/9180/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457e570", "title": "A New Representation of Skeleton Sequences for 3D Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457e570/12OmNyQph8y", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvri/2011/0054/0/05759655", "title": "Automatic skeleton generation and character skinning", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759655/12OmNyqzM1p", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2014/4677/0/4677a130", "title": "Automatic Generation of Skeleton Animation from 3D Human Mesh Model", "doi": null, "abstractUrl": "/proceedings-article/cw/2014/4677a130/12OmNzayNus", "parentPublication": { "id": "proceedings/cw/2014/4677/0", "title": "2014 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/03/i0449", "title": "Skeleton Pruning by Contour Partitioning with Discrete Curve Evolution", "doi": null, "abstractUrl": "/journal/tp/2007/03/i0449/13rRUxDqS57", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030693", "title": "Shape Deformation Using a Skeleton to Drive Simplex Transformations", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030693/13rRUxjQyp7", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2019/5227/0/522700a016", "title": "Skeleton Image Representation for 3D Action Recognition Based on Tree Structure and Reference Joints", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2019/522700a016/1fHlpmJowLe", "parentPublication": { "id": "proceedings/sibgrapi/2019/5227/0", "title": "2019 32nd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428213", "title": "Spatial Reasoning and Context-Aware Attention Network for Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428213/1uim4mnuHaE", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrAdstn", "title": "Computer Graphics and Applications, Pacific Conference on", "acronym": "pg", "groupId": "1000130", "volume": "0", "displayVolume": "0", "year": "2003", "__typename": "ProceedingType" }, "article": { "id": "12OmNxWLTGc", "doi": "10.1109/PCCGA.2003.1238285", "title": "Automatic Animation Skeleton Construction Using Repulsive Force Field", "normalizedTitle": "Automatic Animation Skeleton Construction Using Repulsive Force Field", "abstract": "A method is proposed in this paper to automatically generate the animation skeleton of a model such that the model can be manipulated according to the skeleton. With our method, users can construct the skeleton in a short time, and bring a static model both dynamic and alive. The primary steps of our method are finding skeleton joints, connecting the joints to form an animation skeleton, and binding skin vertices to the skeleton. Initially, a repulsive force field is constructed inside a given model, and a set of points with local minimal force magnitude are found based on the force field. Then, a modified thinning algorithm is applied to generate an initial skeleton, which is further refined to become the final result. When the skeleton construction completes, skin vertices are anchored to the skeleton joints according to the distances between the vertices and joints. In order to build the repulsive force field, hundreds of rays are shot radially from positions inside the model, and it leads to that the force field computation takes most of the execution time. Therefore, an octree structure is used to accelerate this process. Currently, the skeleton generated from a typical 3D model with 1000 to 10000 polygons takes less than 2 minutes on a Intel Pentium 4 2.4 GHz PC.", "abstracts": [ { "abstractType": "Regular", "content": "A method is proposed in this paper to automatically generate the animation skeleton of a model such that the model can be manipulated according to the skeleton. With our method, users can construct the skeleton in a short time, and bring a static model both dynamic and alive. The primary steps of our method are finding skeleton joints, connecting the joints to form an animation skeleton, and binding skin vertices to the skeleton. Initially, a repulsive force field is constructed inside a given model, and a set of points with local minimal force magnitude are found based on the force field. Then, a modified thinning algorithm is applied to generate an initial skeleton, which is further refined to become the final result. When the skeleton construction completes, skin vertices are anchored to the skeleton joints according to the distances between the vertices and joints. In order to build the repulsive force field, hundreds of rays are shot radially from positions inside the model, and it leads to that the force field computation takes most of the execution time. Therefore, an octree structure is used to accelerate this process. Currently, the skeleton generated from a typical 3D model with 1000 to 10000 polygons takes less than 2 minutes on a Intel Pentium 4 2.4 GHz PC.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A method is proposed in this paper to automatically generate the animation skeleton of a model such that the model can be manipulated according to the skeleton. With our method, users can construct the skeleton in a short time, and bring a static model both dynamic and alive. The primary steps of our method are finding skeleton joints, connecting the joints to form an animation skeleton, and binding skin vertices to the skeleton. Initially, a repulsive force field is constructed inside a given model, and a set of points with local minimal force magnitude are found based on the force field. Then, a modified thinning algorithm is applied to generate an initial skeleton, which is further refined to become the final result. When the skeleton construction completes, skin vertices are anchored to the skeleton joints according to the distances between the vertices and joints. In order to build the repulsive force field, hundreds of rays are shot radially from positions inside the model, and it leads to that the force field computation takes most of the execution time. Therefore, an octree structure is used to accelerate this process. Currently, the skeleton generated from a typical 3D model with 1000 to 10000 polygons takes less than 2 minutes on a Intel Pentium 4 2.4 GHz PC.", "fno": "20280409", "keywords": [], "authors": [ { "affiliation": "National Taiwan University", "fullName": "Pin-Chou Liu", "givenName": "Pin-Chou", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "National Taiwan University", "fullName": "Fu-Che Wu", "givenName": "Fu-Che", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "National Taiwan University", "fullName": "Wan-Chun Ma", "givenName": "Wan-Chun", "surname": "Ma", "__typename": "ArticleAuthorType" }, { "affiliation": "National Taiwan University", "fullName": "Rung-Huei Liang", "givenName": "Rung-Huei", "surname": "Liang", "__typename": "ArticleAuthorType" }, { "affiliation": "National Taiwan University", "fullName": "Ming Ouhyoung", "givenName": "Ming", "surname": "Ouhyoung", "__typename": "ArticleAuthorType" } ], "idPrefix": "pg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2003-10-01T00:00:00", "pubType": "proceedings", "pages": "409", "year": "2003", "issn": null, "isbn": "0-7695-2028-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "20280404", "articleId": "12OmNrHB1S3", "__typename": "AdjacentArticleType" }, "next": { "fno": "20280414", "articleId": "12OmNy7h39Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvmp/2009/3893/0/3893a061", "title": "Skeleton Driven Laplacian Volumetric Deformation", "doi": null, "abstractUrl": "/proceedings-article/cvmp/2009/3893a061/12OmNAle6xC", "parentPublication": { "id": "proceedings/cvmp/2009/3893/0", "title": "2009 Conference for Visual Media Production", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2006/2754/0/27540275", "title": "Interactive Skeleton Extraction Using Geodesic Distance", "doi": null, "abstractUrl": "/proceedings-article/icat/2006/27540275/12OmNCgJecr", "parentPublication": { "id": "proceedings/icat/2006/2754/0", "title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncm/2009/3769/0/3769b348", "title": "The Smoothed 3D Skeleton for Animation", "doi": null, "abstractUrl": "/proceedings-article/ncm/2009/3769b348/12OmNqI04VH", "parentPublication": { "id": "proceedings/ncm/2009/3769/0", "title": "Networked Computing and Advanced Information Management, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icita/2005/2316/2/231620015", "title": "Virtual Repulsive Force in Competitive Multi-Robot Teleoperation", "doi": null, "abstractUrl": "/proceedings-article/icita/2005/231620015/12OmNyeECBf", "parentPublication": { "id": "proceedings/icita/2005/2316/2", "title": "Information Technology and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvri/2011/0054/0/05759655", "title": "Automatic skeleton generation and character skinning", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759655/12OmNyqzM1p", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2014/4677/0/4677a130", "title": "Automatic Generation of Skeleton Animation from 3D Human Mesh Model", "doi": null, "abstractUrl": "/proceedings-article/cw/2014/4677a130/12OmNzayNus", "parentPublication": { "id": "proceedings/cw/2014/4677/0", "title": "2014 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2018/04/mcg2018040054", "title": "SpiroSurface: A Repulsive and Attractive Force Display for Interactive Tabletops Using a Pneumatic System", "doi": null, "abstractUrl": "/magazine/cg/2018/04/mcg2018040054/13rRUIJcWnw", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030693", "title": "Shape Deformation Using a Skeleton to Drive Simplex Transformations", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030693/13rRUxjQyp7", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccnea/2022/9109/0/910900a095", "title": "Uav Path Planning Based on Improved Artificial Potential Field Method", "doi": null, "abstractUrl": "/proceedings-article/iccnea/2022/910900a095/1HYv1CPS1uU", "parentPublication": { "id": "proceedings/iccnea/2022/9109/0", "title": "2022 International Conference on Computer Network, Electronic and Automation (ICCNEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800o4321", "title": "Context Aware Graph Convolution for Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800o4321/1m3o0gFXGaQ", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3o0gFXGaQ", "doi": "10.1109/CVPR42600.2020.01434", "title": "Context Aware Graph Convolution for Skeleton-Based Action Recognition", "normalizedTitle": "Context Aware Graph Convolution for Skeleton-Based Action Recognition", "abstract": "Graph convolutional models have gained impressive successes on skeleton based human action recognition task. As graph convolution is a local operation, it cannot fully investigate non-local joints that could be vital to recognizing the action. For example, actions like typing and clapping request the cooperation of two hands, which are distant from each other in a human skeleton graph. Multiple graph convolutional layers thus tend to be stacked together to increase receptive field, which brings in computational inefficiency and optimization difficulty. But there is still no guarantee that distant joints (e.g. two hands) can be well integrated. In this paper, we propose a context aware graph convolutional network (CA-GCN). Besides the computation of localized graph convolution, CA-GCN considers a context term for each vertex by integrating information of all other vertices. Long range dependencies among joints are thus naturally integrated in context information, which then eliminates the need of stacking multiple layers to enlarge receptive field and greatly simplifies the network. Moreover, we further propose an advanced CA-GCN, in which asymmetric relevance measurement and higher level representation are utilized to compute context information for more flexibility and better performance. Besides the joint features, our CA-GCN could also be extended to handle graphs with edge (limb) features. Extensive experiments on two real-world datasets demonstrate the importance of context information and the effectiveness of the proposed CA-GCN in skeleton based action recognition.", "abstracts": [ { "abstractType": "Regular", "content": "Graph convolutional models have gained impressive successes on skeleton based human action recognition task. As graph convolution is a local operation, it cannot fully investigate non-local joints that could be vital to recognizing the action. For example, actions like typing and clapping request the cooperation of two hands, which are distant from each other in a human skeleton graph. Multiple graph convolutional layers thus tend to be stacked together to increase receptive field, which brings in computational inefficiency and optimization difficulty. But there is still no guarantee that distant joints (e.g. two hands) can be well integrated. In this paper, we propose a context aware graph convolutional network (CA-GCN). Besides the computation of localized graph convolution, CA-GCN considers a context term for each vertex by integrating information of all other vertices. Long range dependencies among joints are thus naturally integrated in context information, which then eliminates the need of stacking multiple layers to enlarge receptive field and greatly simplifies the network. Moreover, we further propose an advanced CA-GCN, in which asymmetric relevance measurement and higher level representation are utilized to compute context information for more flexibility and better performance. Besides the joint features, our CA-GCN could also be extended to handle graphs with edge (limb) features. Extensive experiments on two real-world datasets demonstrate the importance of context information and the effectiveness of the proposed CA-GCN in skeleton based action recognition.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Graph convolutional models have gained impressive successes on skeleton based human action recognition task. As graph convolution is a local operation, it cannot fully investigate non-local joints that could be vital to recognizing the action. For example, actions like typing and clapping request the cooperation of two hands, which are distant from each other in a human skeleton graph. Multiple graph convolutional layers thus tend to be stacked together to increase receptive field, which brings in computational inefficiency and optimization difficulty. But there is still no guarantee that distant joints (e.g. two hands) can be well integrated. In this paper, we propose a context aware graph convolutional network (CA-GCN). Besides the computation of localized graph convolution, CA-GCN considers a context term for each vertex by integrating information of all other vertices. Long range dependencies among joints are thus naturally integrated in context information, which then eliminates the need of stacking multiple layers to enlarge receptive field and greatly simplifies the network. Moreover, we further propose an advanced CA-GCN, in which asymmetric relevance measurement and higher level representation are utilized to compute context information for more flexibility and better performance. Besides the joint features, our CA-GCN could also be extended to handle graphs with edge (limb) features. Extensive experiments on two real-world datasets demonstrate the importance of context information and the effectiveness of the proposed CA-GCN in skeleton based action recognition.", "fno": "716800o4321", "keywords": [ "Convolutional Neural Nets", "Feature Extraction", "Graph Theory", "Image Motion Analysis", "Image Representation", "Object Recognition", "Skeleton Based Action Recognition", "CA GCN", "Context Aware Graph Convolutional Network", "Skeleton Based Human Action Recognition Task", "Convolution", "Context Aware Services", "Computational Modeling", "Context Modeling", "Task Analysis", "Skeleton", "Feature Extraction" ], "authors": [ { "affiliation": "UBTECH Sydney AI Centre, School of Computer Science, Faculty of Engineering, The University of Sydney, Australia", "fullName": "Xikun Zhang", "givenName": "Xikun", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "UBTECH Sydney AI Centre, School of Computer Science, Faculty of Engineering, The University of Sydney, Australia", "fullName": "Chang Xu", "givenName": "Chang", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "UBTECH Sydney AI Centre, School of Computer Science, Faculty of Engineering, The University of Sydney, Australia", "fullName": "Dacheng Tao", "givenName": "Dacheng", "surname": "Tao", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "14321-14330", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800o4311", "articleId": "1m3obwPO5va", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800o4331", "articleId": "1m3nzx0OQrm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cecit/2021/3757/0/375700b150", "title": "Spatial-Temporal Graph Convolutional Networks for Action Recognition with Adjacency Matrix Generation Network", "doi": null, "abstractUrl": "/proceedings-article/cecit/2021/375700b150/1CdEBDZSXPW", "parentPublication": { "id": "proceedings/cecit/2021/3757/0", "title": "2021 2nd International Conference on Electronics, Communications and Information Technology (CECIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859752", "title": "GLTA-GCN: Global-Local Temporal Attention Graph Convolutional Network for Unsupervised Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859752/1G9DAPwJBzW", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859694", "title": "Structural Attention for Channel-Wise Adaptive Graph Convolution in Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859694/1G9EEMQjNLO", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscer/2022/8478/0/847800a208", "title": "Inception Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/iscer/2022/847800a208/1HbbBGVP8mk", "parentPublication": { "id": "proceedings/iscer/2022/8478/0", "title": "2022 International Symposium on Control Engineering and Robotics (ISCER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049725", "title": "Skeleton-based Human Action Recognition via Large-kernel Attention Graph Convolutional Network", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049725/1KYorCgCMLe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300g881", "title": "Bayesian Graph Convolution LSTM for Skeleton Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300g881/1hQqtTIauvS", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800a180", "title": "Skeleton-Based Action Recognition With Shift Graph Convolutional Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800a180/1m3nFwgro2Y", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412113", "title": "Temporal Extension Module for Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412113/1tminzkoTCg", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2021/4989/0/09455987", "title": "Spatiotemporal-Spectral Graph Convolutional Networks For Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icmew/2021/09455987/1uCgvxv1gly", "parentPublication": { "id": "proceedings/icmew/2021/4989/0", "title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428355", "title": "Graph Convolutional Hourglass Networks for Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428355/1uimg7WQXVC", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1uiluGq0Oo8", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1uim4mnuHaE", "doi": "10.1109/ICME51207.2021.9428213", "title": "Spatial Reasoning and Context-Aware Attention Network for Skeleton-Based Action Recognition", "normalizedTitle": "Spatial Reasoning and Context-Aware Attention Network for Skeleton-Based Action Recognition", "abstract": "Skeleton-based action recognition has achieved promising performance recently, but there are still many challenges, e.g., the structural relation between joints and the different attention of frames, due to the complex spatial-temporal evolution of skeletal joints. In this paper, we propose a spatial reasoning and context-aware attention network for skeleton-based action recognition, which consists of a spatial reasoning module and a context-aware attention module. The spatial reasoning module can exploit the structural relation between joints to obtain the spatial features within each skeleton frame, followed by the context-aware attention module learning the different attention of frames. We perform experiments on two datasets and verify the effectiveness of each module in the proposed network. The comparison results demonstrate that our method achieves state-of-the-art performance.", "abstracts": [ { "abstractType": "Regular", "content": "Skeleton-based action recognition has achieved promising performance recently, but there are still many challenges, e.g., the structural relation between joints and the different attention of frames, due to the complex spatial-temporal evolution of skeletal joints. In this paper, we propose a spatial reasoning and context-aware attention network for skeleton-based action recognition, which consists of a spatial reasoning module and a context-aware attention module. The spatial reasoning module can exploit the structural relation between joints to obtain the spatial features within each skeleton frame, followed by the context-aware attention module learning the different attention of frames. We perform experiments on two datasets and verify the effectiveness of each module in the proposed network. The comparison results demonstrate that our method achieves state-of-the-art performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Skeleton-based action recognition has achieved promising performance recently, but there are still many challenges, e.g., the structural relation between joints and the different attention of frames, due to the complex spatial-temporal evolution of skeletal joints. In this paper, we propose a spatial reasoning and context-aware attention network for skeleton-based action recognition, which consists of a spatial reasoning module and a context-aware attention module. The spatial reasoning module can exploit the structural relation between joints to obtain the spatial features within each skeleton frame, followed by the context-aware attention module learning the different attention of frames. We perform experiments on two datasets and verify the effectiveness of each module in the proposed network. The comparison results demonstrate that our method achieves state-of-the-art performance.", "fno": "09428213", "keywords": [ "Bone", "Feature Extraction", "Image Motion Analysis", "Image Recognition", "Learning Artificial Intelligence", "Context Aware Attention Network", "Skeleton Based Action Recognition", "Structural Relation", "Spatial Temporal Evolution", "Spatial Reasoning Module", "Spatial Features", "Skeleton Frame", "Context Aware Attention Module Learning", "Conferences", "Cognition", "Skeleton", "Human Action Recognition", "Spatial Reasoning", "Context Aware Model", "Attention Mechanism" ], "authors": [ { "affiliation": "Yanshan University,School of Information Science and Engineering,Qinhuangdao,China", "fullName": "Dianlong You", "givenName": "Dianlong", "surname": "You", "__typename": "ArticleAuthorType" }, { "affiliation": "Yanshan University,School of Information Science and Engineering,Qinhuangdao,China", "fullName": "Ling Wang", "givenName": "Ling", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Cardiff University,School of Medicine,Cardiff,UK", "fullName": "Da Han", "givenName": "Da", "surname": "Han", "__typename": "ArticleAuthorType" }, { "affiliation": "Yanshan University,School of Information Science and Engineering,Qinhuangdao,China", "fullName": "Shunpan Liang", "givenName": "Shunpan", "surname": "Liang", "__typename": "ArticleAuthorType" }, { "affiliation": "Yanshan University,School of Information Science and Engineering,Qinhuangdao,China", "fullName": "Hongyang Liu", "givenName": "Hongyang", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Yanshan University,School of Information Science and Engineering,Qinhuangdao,China", "fullName": "Fuyong Yuan", "givenName": "Fuyong", "surname": "Yuan", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2021", "issn": null, "isbn": "978-1-6654-3864-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09428304", "articleId": "1uilBV6r4UU", "__typename": "AdjacentArticleType" }, "next": { "fno": "09428142", "articleId": "1uilEFwR5Sw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2018/3788/0/08546012", "title": "Action Recognition with Visual Attention on Skeleton Images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08546012/17D45WwsQ53", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigdatase/2021/0038/0/003800a023", "title": "Two-stream Graph Attention Convolutional for Video Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/bigdatase/2021/003800a023/1BzUyAWp44w", "parentPublication": { "id": "proceedings/bigdatase/2021/0038/0", "title": "2021 IEEE 15th International Conference on Big Data Science and Engineering (BigDataSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859694", "title": "Structural Attention for Channel-Wise Adaptive Graph Convolution in Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859694/1G9EEMQjNLO", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956662", "title": "Temporal Shift and Attention Modules for Graphical Skeleton Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956662/1IHpKe3Q9Fu", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2023/4544/0/10042671", "title": "Part-aware Prototypical Graph Network for One-shot Skeleton-based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/fg/2023/10042671/1KOuXyhlfEI", "parentPublication": { "id": "proceedings/fg/2023/4544/0", "title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200a826", "title": "Relational Network for Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200a826/1cdOPT8DkTS", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300b227", "title": "An Attention Enhanced Graph Convolutional LSTM Network for Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300b227/1gyrM0FYz5e", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/06/09321130", "title": "Spatiotemporal Co-Attention Recurrent Neural Networks for Human-Skeleton Motion Prediction", "doi": null, "abstractUrl": "/journal/tp/2022/06/09321130/1qkwzzV7Zug", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isctt/2020/8575/0/857500a183", "title": "Human Skeleton Graph Attention Convolutional for Video Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/isctt/2020/857500a183/1rHeLWELQru", "parentPublication": { "id": "proceedings/isctt/2020/8575/0", "title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428403", "title": "Recurrent Graph Convolutional Autoencoder for Unsupervised Skeleton-Based Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428403/1uilAM4szPW", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvA1hvU", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "acronym": "iih-msp", "groupId": "1001543", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNAhxjEd", "doi": "10.1109/IIH-MSP.2009.171", "title": "Analysis of Gait Motion by Using Motion Capture in the Japanese Traditional Performing Arts", "normalizedTitle": "Analysis of Gait Motion by Using Motion Capture in the Japanese Traditional Performing Arts", "abstract": "The purpose of this research is to analysis feature parameters of Suriashi (sliding gait motion in Japanese traditional performing arts and Japanese traditional martial arts) with multivariate data analysis and to discriminate Suriashi movement among other gait motions using SVM (Support Vector Machine). Experiments were carried out using motion capture on the Suriashi of Japanese Traditional Dance and Noh and ordinary human gait. We can analyze the principal parameter to distinguish the Suriashi in various gait motions by conducting PCA (Principal Components Analysis) and cluster analysis of parameters of gait motion. It was found that gait motion of Suriashi can be recognized by using SVM to discriminate among the different gait motions. In addition, it is expected that our research will help practitioners and masters of Japanese Traditional Dance and Noh with Suriashi training through giving new information on Suriashi movements.", "abstracts": [ { "abstractType": "Regular", "content": "The purpose of this research is to analysis feature parameters of Suriashi (sliding gait motion in Japanese traditional performing arts and Japanese traditional martial arts) with multivariate data analysis and to discriminate Suriashi movement among other gait motions using SVM (Support Vector Machine). Experiments were carried out using motion capture on the Suriashi of Japanese Traditional Dance and Noh and ordinary human gait. We can analyze the principal parameter to distinguish the Suriashi in various gait motions by conducting PCA (Principal Components Analysis) and cluster analysis of parameters of gait motion. It was found that gait motion of Suriashi can be recognized by using SVM to discriminate among the different gait motions. In addition, it is expected that our research will help practitioners and masters of Japanese Traditional Dance and Noh with Suriashi training through giving new information on Suriashi movements.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The purpose of this research is to analysis feature parameters of Suriashi (sliding gait motion in Japanese traditional performing arts and Japanese traditional martial arts) with multivariate data analysis and to discriminate Suriashi movement among other gait motions using SVM (Support Vector Machine). Experiments were carried out using motion capture on the Suriashi of Japanese Traditional Dance and Noh and ordinary human gait. We can analyze the principal parameter to distinguish the Suriashi in various gait motions by conducting PCA (Principal Components Analysis) and cluster analysis of parameters of gait motion. It was found that gait motion of Suriashi can be recognized by using SVM to discriminate among the different gait motions. In addition, it is expected that our research will help practitioners and masters of Japanese Traditional Dance and Noh with Suriashi training through giving new information on Suriashi movements.", "fno": "3762b164", "keywords": [ "Suriashi", "Motion Data", "Feature Parameter", "Principal Components Analysis", "Support Vector Machine" ], "authors": [ { "affiliation": null, "fullName": "Woong Choi", "givenName": "Woong", "surname": "Choi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hiroyuki Sekiguchi", "givenName": "Hiroyuki", "surname": "Sekiguchi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kozaburo Hachimura", "givenName": "Kozaburo", "surname": "Hachimura", "__typename": "ArticleAuthorType" } ], "idPrefix": "iih-msp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-09-01T00:00:00", "pubType": "proceedings", "pages": "1164-1167", "year": "2009", "issn": null, "isbn": "978-0-7695-3762-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3762b160", "articleId": "12OmNqI04Kd", "__typename": "AdjacentArticleType" }, "next": { "fno": "3762b168", "articleId": "12OmNBSSVmb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iih-msp/2009/3762/0/3762b144", "title": "A Retrieval System for Ballet Steps Using Three-dimensional Motion Data", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2009/3762b144/12OmNA0MZ2f", "parentPublication": { "id": "proceedings/iih-msp/2009/3762/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2014/4677/0/4677a071", "title": "Feasibility Study for Contemporary Dance E-Learning: An Interactive Creation Support System Using 3D Motion Data", "doi": null, "abstractUrl": "/proceedings-article/cw/2014/4677a071/12OmNBaBuQH", "parentPublication": { "id": "proceedings/cw/2014/4677/0", "title": "2014 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/humo/2000/0939/0/09390003", "title": "Phase in model-free perception of gait", "doi": null, "abstractUrl": "/proceedings-article/humo/2000/09390003/12OmNCm7BER", "parentPublication": { "id": "proceedings/humo/2000/0939/0", "title": "Human Motion, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813392", "title": "Gait Analysis using Independent Components of image motion", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813392/12OmNwBT1m4", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2010/4263/2/4263b331", "title": "Discovery of Gait Anomalies from Motion Sensor Data", "doi": null, "abstractUrl": "/proceedings-article/ictai/2010/4263b331/12OmNwdtwdM", "parentPublication": { "id": "proceedings/ictai/2010/4263/2", "title": "2010 22nd IEEE International Conference on Tools with Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2006/2503/0/25030579", "title": "Gait Recognition by Two-Stage Principal Component Analysis", "doi": null, "abstractUrl": "/proceedings-article/fg/2006/25030579/12OmNweTvMq", "parentPublication": { "id": "proceedings/fg/2006/2503/0", "title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2012/4814/0/4814a045", "title": "Development of Easy-to-Use Authoring System for Noh (Japanese Traditional) Dance Animation", "doi": null, "abstractUrl": "/proceedings-article/cw/2012/4814a045/12OmNxaNGmE", "parentPublication": { "id": "proceedings/cw/2012/4814/0", "title": "2012 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446498", "title": "Performance-Driven Dance Motion Control of a Virtual Partner Character", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446498/13bd1fdV4lU", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/03/ttg2012030501", "title": "Example-Based Automatic Music-Driven Conventional Dance Motion Synthesis", "doi": null, "abstractUrl": "/journal/tg/2012/03/ttg2012030501/13rRUwwaKt6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a084", "title": "Real-Time Gait Reconstruction For Virtual Reality Using a Single Sensor", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a084/1pBMjFD8jVm", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNywfKys", "title": "Pattern Recognition, International Conference on", "acronym": "icpr", "groupId": "1000545", "volume": "3", "displayVolume": "3", "year": "2002", "__typename": "ProceedingType" }, "article": { "id": "12OmNApcuzs", "doi": "10.1109/ICPR.2002.1047975", "title": "Human Motion Signatures: Analysis, Synthesis, Recognition", "normalizedTitle": "Human Motion Signatures: Analysis, Synthesis, Recognition", "abstract": "Human motion is the composite consequence of multiple elements, including the action performed and a motion signature that captures the distinctive pattern of movement of a particular individual. We develop a new algorithm that is capable of extracting these motion elements and recombining them in novel ways. The algorithm analyzes motion data spanning multiple subjects performing different actions. The analysis yields a generative motion model that can synthesize new motions in the distinctive styles of these individuals. Our algorithms can also recognize people and actions from new motions by comparing motion signatures and action parameters.", "abstracts": [ { "abstractType": "Regular", "content": "Human motion is the composite consequence of multiple elements, including the action performed and a motion signature that captures the distinctive pattern of movement of a particular individual. We develop a new algorithm that is capable of extracting these motion elements and recombining them in novel ways. The algorithm analyzes motion data spanning multiple subjects performing different actions. The analysis yields a generative motion model that can synthesize new motions in the distinctive styles of these individuals. Our algorithms can also recognize people and actions from new motions by comparing motion signatures and action parameters.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Human motion is the composite consequence of multiple elements, including the action performed and a motion signature that captures the distinctive pattern of movement of a particular individual. We develop a new algorithm that is capable of extracting these motion elements and recombining them in novel ways. The algorithm analyzes motion data spanning multiple subjects performing different actions. The analysis yields a generative motion model that can synthesize new motions in the distinctive styles of these individuals. Our algorithms can also recognize people and actions from new motions by comparing motion signatures and action parameters.", "fno": "169530456", "keywords": [], "authors": [ { "affiliation": "University of Toronto", "fullName": "M. Alex O. Vasilescu", "givenName": "M. Alex O.", "surname": "Vasilescu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2002-08-01T00:00:00", "pubType": "proceedings", "pages": "30456", "year": "2002", "issn": "1051-4651", "isbn": "0-7695-1695-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "169530452", "articleId": "12OmNC4eSFi", "__typename": "AdjacentArticleType" }, "next": { "fno": "169530461", "articleId": "12OmNqJHFKq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icassp/2002/7402/4/05745425", "title": "Independent component analysis and synthesis of human motion", "doi": null, "abstractUrl": "/proceedings-article/icassp/2002/05745425/12OmNAle6zH", "parentPublication": { "id": "proceedings/icassp/2002/7402/4", "title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2005/9331/0/01521674", "title": "A Study of Synthesizing New Human Motions from Sampled Motions Using Tensor Decomposition", "doi": null, "abstractUrl": "/proceedings-article/icme/2005/01521674/12OmNvT2p9Q", "parentPublication": { "id": "proceedings/icme/2005/9331/0", "title": "2005 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2015/7673/0/7673a193", "title": "Synthesis and Editing of Human Motion with Generative Human Motion Model", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2015/7673a193/12OmNyYm2oB", "parentPublication": { "id": "proceedings/icvrv/2015/7673/0", "title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/05/06626306", "title": "Learning Actionlet Ensemble for 3D Human Action Recognition", "doi": null, "abstractUrl": "/journal/tp/2014/05/06626306/13rRUwbs1TK", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2021/3176/0/09666948", "title": "Skeleton-based Action Recognition for Human-Robot Interaction using Self-Attention Mechanism", "doi": null, "abstractUrl": "/proceedings-article/fg/2021/09666948/1A6BxEU8Rpu", "parentPublication": { "id": "proceedings/fg/2021/3176/0", "title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200k0965", "title": "Action-Conditioned 3D Human Motion Synthesis with Transformer VAE", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200k0965/1BmIZHGnurS", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600u0428", "title": "Towards Diverse and Natural Scene-aware 3D Human Motion Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600u0428/1H0KwFYcMFi", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3833", "title": "Programmatic Concept Learning for Human Motion Description and Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3833/1H1j6vxewQE", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600i141", "title": "Weakly-supervised Action Transition Learning for Stochastic Human Motion Prediction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600i141/1H1mW0pifSM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900b276", "title": "Self-supervised Motion Learning from Static Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900b276/1yeLYOoH3Hy", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwt5sgJ", "title": "CVPR 2011", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNrHB1Qj", "doi": "10.1109/CVPR.2011.5995424", "title": "Markerless motion capture of interacting characters using multi-view image segmentation", "normalizedTitle": "Markerless motion capture of interacting characters using multi-view image segmentation", "abstract": "We present a markerless motion capture approach that reconstructs the skeletal motion and detailed time-varying surface geometry of two closely interacting people from multi-view video. Due to ambiguities in feature-to-person assignments and frequent occlusions, it is not feasible to directly apply single-person capture approaches to the multi-person case. We therefore propose a combined image segmentation and tracking approach to overcome these difficulties. A new probabilistic shape and appearance model is employed to segment the input images and to assign each pixel uniquely to one person. Thereafter, a single-person markerless motion and surface capture approach can be applied to each individual, either one-by-one or in parallel, even under strong occlusions. We demonstrate the performance of our approach on several challenging multi-person motions, including dance and martial arts, and also provide a reference dataset for multi-person motion capture with ground truth.", "abstracts": [ { "abstractType": "Regular", "content": "We present a markerless motion capture approach that reconstructs the skeletal motion and detailed time-varying surface geometry of two closely interacting people from multi-view video. Due to ambiguities in feature-to-person assignments and frequent occlusions, it is not feasible to directly apply single-person capture approaches to the multi-person case. We therefore propose a combined image segmentation and tracking approach to overcome these difficulties. A new probabilistic shape and appearance model is employed to segment the input images and to assign each pixel uniquely to one person. Thereafter, a single-person markerless motion and surface capture approach can be applied to each individual, either one-by-one or in parallel, even under strong occlusions. We demonstrate the performance of our approach on several challenging multi-person motions, including dance and martial arts, and also provide a reference dataset for multi-person motion capture with ground truth.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a markerless motion capture approach that reconstructs the skeletal motion and detailed time-varying surface geometry of two closely interacting people from multi-view video. Due to ambiguities in feature-to-person assignments and frequent occlusions, it is not feasible to directly apply single-person capture approaches to the multi-person case. We therefore propose a combined image segmentation and tracking approach to overcome these difficulties. A new probabilistic shape and appearance model is employed to segment the input images and to assign each pixel uniquely to one person. Thereafter, a single-person markerless motion and surface capture approach can be applied to each individual, either one-by-one or in parallel, even under strong occlusions. We demonstrate the performance of our approach on several challenging multi-person motions, including dance and martial arts, and also provide a reference dataset for multi-person motion capture with ground truth.", "fno": "05995424", "keywords": [ "Multiperson Motion Capture", "Markerless Motion Capture", "Interacting Characters", "Multi View Image Segmentation", "Skeletal Motion", "Detailed Time Varying Surface Geometry", "Multiview Video", "Probabilistic Shape", "Occlusions" ], "authors": [ { "affiliation": null, "fullName": "Yebin Liu", "givenName": null, "surname": "Yebin Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "C. Stoll", "givenName": "C.", "surname": "Stoll", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "J. Gall", "givenName": "J.", "surname": "Gall", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "H-P Seidel", "givenName": "H-P", "surname": "Seidel", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "C. Theobalt", "givenName": "C.", "surname": "Theobalt", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-06-01T00:00:00", "pubType": "proceedings", "pages": "1249-1256", "year": "2011", "issn": null, "isbn": "978-1-4577-0394-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05995423", "articleId": "12OmNxWLTAd", "__typename": "AdjacentArticleType" }, "next": { "fno": "05995425", "articleId": "12OmNAlvHpT", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dicta/2008/3456/0/3456a420", "title": "Multi-view Human Motion Capture with an Improved Deformation Skin Model", "doi": null, "abstractUrl": "/proceedings-article/dicta/2008/3456a420/12OmNB1eJyX", "parentPublication": { "id": "proceedings/dicta/2008/3456/0", "title": "2008 Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995582", "title": "Functional categorization of objects using real-time markerless motion capture", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995582/12OmNB7LvzQ", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmai/2007/2901/0/29010091", "title": "Exploring Motion Sequence of Virtual Characters: Experimenting Motion Capture Variables", "doi": null, "abstractUrl": "/proceedings-article/gmai/2007/29010091/12OmNButq0m", "parentPublication": { "id": "proceedings/gmai/2007/2901/0", "title": "2007 Geometric Modeling and Imaging: New Advances", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206859", "title": "Markerless Motion Capture with unsynchronized moving cameras", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206859/12OmNqI04JK", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04270153", "title": "Scaled Motion Dynamics for Markerless Motion Capture", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270153/12OmNxxvAPo", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cviie/2005/2524/0/25240015", "title": "Markerless Motion Capture using Multiple Cameras", "doi": null, "abstractUrl": "/proceedings-article/cviie/2005/25240015/12OmNxymo7V", "parentPublication": { "id": "proceedings/cviie/2005/2524/0", "title": "Computer Vision for Interactive and Intelligent Environment", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2008/2242/0/04587520", "title": "Markerless motion capture of man-machine interaction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587520/12OmNyLiuzH", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdip/2009/3565/0/3565a112", "title": "A New Algorithm for Human Motion Capture via 3D Active Contours", "doi": null, "abstractUrl": "/proceedings-article/icdip/2009/3565a112/12OmNzgNXZU", "parentPublication": { "id": "proceedings/icdip/2009/3565/0", "title": "Digital Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/11/ttp2013112720", "title": "Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation", "doi": null, "abstractUrl": "/journal/tp/2013/11/ttp2013112720/13rRUxDqS9B", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300a823", "title": "Markerless Outdoor Human Motion Capture Using Multiple Autonomous Micro Aerial Vehicles", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300a823/1hQqk33280w", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx0A7K1", "title": "Face and Gesture 2011", "acronym": "fg", "groupId": "1000065", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNviZlz1", "doi": "10.1109/FG.2011.5771384", "title": "Realistic head motion synthesis for an image-based talking head", "normalizedTitle": "Realistic head motion synthesis for an image-based talking head", "abstract": "In this paper, we present a novel approach to add flexible head motions to talking heads. First, head motion patterns are collected from original recordings. These head motion patterns are recorded video segments with different head motions, like nod and shake. The head motion is synthesized by selecting and concatenating appropriate head motion patterns according to the input text or head motion tags. In order to join these patterns, optical flow based morphing is used to smooth transitions without introducing noticeable discontinuities. Experimental results show that head motion synthesis is realistic, and animations with flexible head motions are rated with a higher average mean opinion score than the ones with repeated head motions.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a novel approach to add flexible head motions to talking heads. First, head motion patterns are collected from original recordings. These head motion patterns are recorded video segments with different head motions, like nod and shake. The head motion is synthesized by selecting and concatenating appropriate head motion patterns according to the input text or head motion tags. In order to join these patterns, optical flow based morphing is used to smooth transitions without introducing noticeable discontinuities. Experimental results show that head motion synthesis is realistic, and animations with flexible head motions are rated with a higher average mean opinion score than the ones with repeated head motions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a novel approach to add flexible head motions to talking heads. First, head motion patterns are collected from original recordings. These head motion patterns are recorded video segments with different head motions, like nod and shake. The head motion is synthesized by selecting and concatenating appropriate head motion patterns according to the input text or head motion tags. In order to join these patterns, optical flow based morphing is used to smooth transitions without introducing noticeable discontinuities. Experimental results show that head motion synthesis is realistic, and animations with flexible head motions are rated with a higher average mean opinion score than the ones with repeated head motions.", "fno": "05771384", "keywords": [ "Head", "Magnetic Heads", "Databases", "Animation", "Interpolation", "Optical Imaging", "Adaptive Optics" ], "authors": [ { "affiliation": "Institut f ¨ur Informationsverarbeitung, Leibniz Universit¨at Hannover, Appelstr. 9A 30167 Hannover, Germany", "fullName": "Kang Liu", "givenName": "Kang", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Institut f ¨ur Informationsverarbeitung, Leibniz Universit¨at Hannover, Appelstr. 9A 30167 Hannover, Germany", "fullName": "Joern Ostermann", "givenName": "Joern", "surname": "Ostermann", "__typename": "ArticleAuthorType" } ], "idPrefix": "fg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-03-01T00:00:00", "pubType": "proceedings", "pages": "125-130", "year": "2011", "issn": null, "isbn": "978-1-4244-9140-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05771383", "articleId": "12OmNqNXEmD", "__typename": "AdjacentArticleType" }, "next": { "fno": "05771386", "articleId": "12OmNwE9Om7", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2017/0563/0/08273607", "title": "Perceptual enhancement of emotional mocap head motion: An experimental study", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273607/12OmNBhpSbx", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2013/11/0/06728902", "title": "Flying head: A head-synchronization mechanism for flying telepresence", "doi": null, "abstractUrl": "/proceedings-article/icat/2013/06728902/12OmNwHhoOM", "parentPublication": { "id": "proceedings/icat/2013/11/0", "title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549379", "title": "Head motion animation using avatar gaze space", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1995/7310/2/73102591", "title": "A new frame interpolation scheme for talking head sequences", "doi": null, "abstractUrl": "/proceedings-article/icip/1995/73102591/12OmNypIYye", "parentPublication": { "id": "proceedings/icip/1995/7310/2", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811014", "title": "Natural Eye Motion Synthesis by Modeling Gaze-Head Coupling", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811014/12OmNzC5T34", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2011/9140/0/05771401", "title": "Realistic head motion synthesis for an image-based talking head", "doi": null, "abstractUrl": "/proceedings-article/fg/2011/05771401/12OmNzVoBzX", "parentPublication": { "id": "proceedings/fg/2011/9140/0", "title": "Face and Gesture 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011835", "title": "Realistic facial expression synthesis for an image-based talking head", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011835/12OmNzcxYWX", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10061572", "title": "Free-HeadGAN: Neural Talking Head Synthesis with Explicit Gaze Control", "doi": null, "abstractUrl": "/journal/tp/5555/01/10061572/1Lk2C6ZD2zC", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdi3c/2021/2569/0/256900a041", "title": "Development of Head Motion Controlled Wheelchair", "doi": null, "abstractUrl": "/proceedings-article/icdi3c/2021/256900a041/1xeWFm36bXa", "parentPublication": { "id": "proceedings/icdi3c/2021/2569/0", "title": "2021 International Conference on Design Innovations for 3Cs Compute Communicate Control (ICDI3C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0034", "title": "One-Shot Free-View Neural Talking-Head Synthesis for Video Conferencing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0034/1yeKYgqBB3W", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNro0Ib0", "title": "Computer Graphics and Applications, Pacific Conference on", "acronym": "pg", "groupId": "1000130", "volume": "0", "displayVolume": "0", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNyo1nRs", "doi": "10.1109/PCCGA.2004.1348342", "title": "Personalised Real-Time Idle Motion Synthesis", "normalizedTitle": "Personalised Real-Time Idle Motion Synthesis", "abstract": "In this paper, we propose a novel animation approach based on Principal Component Analysis that allows generating two layers of subtle motions: small posture variations and personalised change of balance. Such a motion generator is needed in many cases when one attempts to create an animation sequence out of a set of existing clips. In nature there exists no motionless character, while in computer animation we often encounter cases where no planned actions, such as waiting for another actor finishing his/her part, is implemented as a stop/frozen animation. We identify many situations where a flexible idle motion generator can help: from synchronisation of speech/body animation duration, to dynamic creation of stand still variations in between two active plays. Our approach overcomes the limitations of using a small set of existing clips as a basis for synthesizing idle motions, such as unnatural repetition of movements and difficulties to insert idle motions into an animation without breaking its continuity. A realistic animation is obtained by blending small posture variations with personalised balance shifting animations.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a novel animation approach based on Principal Component Analysis that allows generating two layers of subtle motions: small posture variations and personalised change of balance. Such a motion generator is needed in many cases when one attempts to create an animation sequence out of a set of existing clips. In nature there exists no motionless character, while in computer animation we often encounter cases where no planned actions, such as waiting for another actor finishing his/her part, is implemented as a stop/frozen animation. We identify many situations where a flexible idle motion generator can help: from synchronisation of speech/body animation duration, to dynamic creation of stand still variations in between two active plays. Our approach overcomes the limitations of using a small set of existing clips as a basis for synthesizing idle motions, such as unnatural repetition of movements and difficulties to insert idle motions into an animation without breaking its continuity. A realistic animation is obtained by blending small posture variations with personalised balance shifting animations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a novel animation approach based on Principal Component Analysis that allows generating two layers of subtle motions: small posture variations and personalised change of balance. Such a motion generator is needed in many cases when one attempts to create an animation sequence out of a set of existing clips. In nature there exists no motionless character, while in computer animation we often encounter cases where no planned actions, such as waiting for another actor finishing his/her part, is implemented as a stop/frozen animation. We identify many situations where a flexible idle motion generator can help: from synchronisation of speech/body animation duration, to dynamic creation of stand still variations in between two active plays. Our approach overcomes the limitations of using a small set of existing clips as a basis for synthesizing idle motions, such as unnatural repetition of movements and difficulties to insert idle motions into an animation without breaking its continuity. A realistic animation is obtained by blending small posture variations with personalised balance shifting animations.", "fno": "22340121", "keywords": [], "authors": [ { "affiliation": "MIRALab - University of Geneva, Switzerland", "fullName": "Arjan Egges", "givenName": "Arjan", "surname": "Egges", "__typename": "ArticleAuthorType" }, { "affiliation": "MIRALab - University of Geneva, Switzerland", "fullName": "Tom Molet", "givenName": "Tom", "surname": "Molet", "__typename": "ArticleAuthorType" }, { "affiliation": "MIRALab - University of Geneva, Switzerland", "fullName": "Nadia Magnenat-Thalmann", "givenName": "Nadia", "surname": "Magnenat-Thalmann", "__typename": "ArticleAuthorType" } ], "idPrefix": "pg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-10-01T00:00:00", "pubType": "proceedings", "pages": "121-130", "year": "2004", "issn": "1550-4085", "isbn": "0-7695-2234-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "22340111", "articleId": "12OmNBO3Kil", "__typename": "AdjacentArticleType" }, "next": { "fno": "22340133", "articleId": "12OmNrAv3GQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/alpit/2007/2930/0/2930a253", "title": "Real-Time Motion Simulation of Artificial Fish for Virtual Marine World", "doi": null, "abstractUrl": "/proceedings-article/alpit/2007/2930a253/12OmNqI04LB", "parentPublication": { "id": "proceedings/alpit/2007/2930/0", "title": "Advanced Language Processing and Web Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1991/2163/0/00131687", "title": "Real-time motion scheduling for a SMALL workcell", "doi": null, "abstractUrl": "/proceedings-article/robot/1991/00131687/12OmNvHoQoX", "parentPublication": { "id": "proceedings/robot/1991/2163/0", "title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2015/9403/0/9403a200", "title": "Automatic Composition by Body-Part Motion Synthesis for Supporting Dance Creation", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a200/12OmNyOq55Y", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2015/7673/0/7673a193", "title": "Synthesis and Editing of Human Motion with Generative Human Motion Model", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2015/7673a193/12OmNyYm2oB", "parentPublication": { "id": "proceedings/icvrv/2015/7673/0", "title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2008/3456/0/3456a392", "title": "Interactive Motion Browse and Synthesis from Unorganized Motion Data Set", "doi": null, "abstractUrl": "/proceedings-article/dicta/2008/3456a392/12OmNzBwGEu", "parentPublication": { "id": "proceedings/dicta/2008/3456/0", "title": "2008 Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2017/2610/0/261001a309", "title": "Controllable Variation Synthesis for Surface Motion Capture", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a309/12OmNzTppDP", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2005/02/mcg2005020024", "title": "Automated Eye Motion Using Texture Synthesis", "doi": null, "abstractUrl": "/magazine/cg/2005/02/mcg2005020024/13rRUwInvLR", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1997/06/mcg1997060039", "title": "Interpolation Synthesis of Articulated Figure Motion", "doi": null, "abstractUrl": "/magazine/cg/1997/06/mcg1997060039/13rRUyeTVki", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g583", "title": "Style-ERD: Responsive and Coherent Online Motion Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g583/1H0NZuIQvyE", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900n3607", "title": "Autoregressive Stylized Motion Synthesis with Generative Flow", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900n3607/1yeIFQTwlXO", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx0A7K1", "title": "Face and Gesture 2011", "acronym": "fg", "groupId": "1000065", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNzVoBzX", "doi": "10.1109/FG.2011.5771401", "title": "Realistic head motion synthesis for an image-based talking head", "normalizedTitle": "Realistic head motion synthesis for an image-based talking head", "abstract": "In this paper, we present a novel approach to add flexible head motions to talking heads. First, head motion patterns are collected from original recordings. These head motion patterns are recorded video segments with different head motions, like nod and shake. The head motion is synthesized by selecting and concatenating appropriate head motion patterns according to the input text or head motion tags. In order to join these patterns, optical flow based morphing is used to smooth transitions without introducing noticeable discontinuities. Experimental results show that head motion synthesis is realistic, and animations with flexible head motions are rated with a higher average mean opinion score than the ones with repeated head motions.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a novel approach to add flexible head motions to talking heads. First, head motion patterns are collected from original recordings. These head motion patterns are recorded video segments with different head motions, like nod and shake. The head motion is synthesized by selecting and concatenating appropriate head motion patterns according to the input text or head motion tags. In order to join these patterns, optical flow based morphing is used to smooth transitions without introducing noticeable discontinuities. Experimental results show that head motion synthesis is realistic, and animations with flexible head motions are rated with a higher average mean opinion score than the ones with repeated head motions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a novel approach to add flexible head motions to talking heads. First, head motion patterns are collected from original recordings. These head motion patterns are recorded video segments with different head motions, like nod and shake. The head motion is synthesized by selecting and concatenating appropriate head motion patterns according to the input text or head motion tags. In order to join these patterns, optical flow based morphing is used to smooth transitions without introducing noticeable discontinuities. Experimental results show that head motion synthesis is realistic, and animations with flexible head motions are rated with a higher average mean opinion score than the ones with repeated head motions.", "fno": "05771401", "keywords": [ "Computer Animation", "Image Sequences", "Realistic Head Motion Synthesis", "Image Based Talking Head", "Head Motion Patterns", "Optical Flow Based Morphing", "Animations", "Mean Opinion Score", "Head", "Magnetic Heads", "Databases", "Animation", "Interpolation", "Optical Imaging", "Adaptive Optics" ], "authors": [ { "affiliation": "Institut für Informationsverarbeitung, Leibniz Universität Hannover, Appelstr. 9A 30167 Hannover, Germany", "fullName": "Kang Liu", "givenName": "Kang", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Institut für Informationsverarbeitung, Leibniz Universität Hannover, Appelstr. 9A 30167 Hannover, Germany", "fullName": "Joern Ostermann", "givenName": "Joern", "surname": "Ostermann", "__typename": "ArticleAuthorType" } ], "idPrefix": "fg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-03-01T00:00:00", "pubType": "proceedings", "pages": "221-226", "year": "2011", "issn": null, "isbn": "978-1-4244-9140-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05771400", "articleId": "12OmNButpYT", "__typename": "AdjacentArticleType" }, "next": { "fno": "05771402", "articleId": "12OmNzAoi0j", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2017/0563/0/08273607", "title": "Perceptual enhancement of emotional mocap head motion: An experimental study", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273607/12OmNBhpSbx", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2011/9140/0/05771384", "title": "Realistic head motion synthesis for an image-based talking head", "doi": null, "abstractUrl": "/proceedings-article/fg/2011/05771384/12OmNviZlz1", "parentPublication": { "id": "proceedings/fg/2011/9140/0", "title": "Face and Gesture 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2013/11/0/06728902", "title": "Flying head: A head-synchronization mechanism for flying telepresence", "doi": null, "abstractUrl": "/proceedings-article/icat/2013/06728902/12OmNwHhoOM", "parentPublication": { "id": "proceedings/icat/2013/11/0", "title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549379", "title": "Head motion animation using avatar gaze space", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1995/7310/2/73102591", "title": "A new frame interpolation scheme for talking head sequences", "doi": null, "abstractUrl": "/proceedings-article/icip/1995/73102591/12OmNypIYye", "parentPublication": { "id": "proceedings/icip/1995/7310/2", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811014", "title": "Natural Eye Motion Synthesis by Modeling Gaze-Head Coupling", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811014/12OmNzC5T34", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011835", "title": "Realistic facial expression synthesis for an image-based talking head", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011835/12OmNzcxYWX", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10061572", "title": "Free-HeadGAN: Neural Talking Head Synthesis with Explicit Gaze Control", "doi": null, "abstractUrl": "/journal/tp/5555/01/10061572/1Lk2C6ZD2zC", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdi3c/2021/2569/0/256900a041", "title": "Development of Head Motion Controlled Wheelchair", "doi": null, "abstractUrl": "/proceedings-article/icdi3c/2021/256900a041/1xeWFm36bXa", "parentPublication": { "id": "proceedings/icdi3c/2021/2569/0", "title": "2021 International Conference on Design Innovations for 3Cs Compute Communicate Control (ICDI3C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0034", "title": "One-Shot Free-View Neural Talking-Head Synthesis for Video Conferencing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0034/1yeKYgqBB3W", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1fdV4lU", "doi": "10.1109/VR.2018.8446498", "title": "Performance-Driven Dance Motion Control of a Virtual Partner Character", "normalizedTitle": "Performance-Driven Dance Motion Control of a Virtual Partner Character", "abstract": "Taking advantage of motion capture and display technologies, a method giving a user the ability to control the dance motions of a virtual partner in an immersive setup was developed and is presented in this paper. The method utilizes a dance motion dataset containing the motion of both dancers (leader and partner). A hidden Markov model (HMM) was used to learn the structure of the dance motions. The HMM was trained on the motion of a chosen dancer (leader or partner), and during runtime, the system predicts the progress of the chosen dance motion, which corresponds to the progress of the user's motion. The regular structure of the HMM was extended by utilizing a jump state transition, allowing the user to improvise dance motions during the runtime. Since the jump state addition increases the model's complexity, an effort was made to optimize the prediction process to ensure runtime efficiency. A few corrective steps were also implemented to ensure the partner character's motions appear natural. A user study was conducted to understand the naturalness of the synthesized motion as well as the control that the user has on the partner character's synthesized motion.", "abstracts": [ { "abstractType": "Regular", "content": "Taking advantage of motion capture and display technologies, a method giving a user the ability to control the dance motions of a virtual partner in an immersive setup was developed and is presented in this paper. The method utilizes a dance motion dataset containing the motion of both dancers (leader and partner). A hidden Markov model (HMM) was used to learn the structure of the dance motions. The HMM was trained on the motion of a chosen dancer (leader or partner), and during runtime, the system predicts the progress of the chosen dance motion, which corresponds to the progress of the user's motion. The regular structure of the HMM was extended by utilizing a jump state transition, allowing the user to improvise dance motions during the runtime. Since the jump state addition increases the model's complexity, an effort was made to optimize the prediction process to ensure runtime efficiency. A few corrective steps were also implemented to ensure the partner character's motions appear natural. A user study was conducted to understand the naturalness of the synthesized motion as well as the control that the user has on the partner character's synthesized motion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Taking advantage of motion capture and display technologies, a method giving a user the ability to control the dance motions of a virtual partner in an immersive setup was developed and is presented in this paper. The method utilizes a dance motion dataset containing the motion of both dancers (leader and partner). A hidden Markov model (HMM) was used to learn the structure of the dance motions. The HMM was trained on the motion of a chosen dancer (leader or partner), and during runtime, the system predicts the progress of the chosen dance motion, which corresponds to the progress of the user's motion. The regular structure of the HMM was extended by utilizing a jump state transition, allowing the user to improvise dance motions during the runtime. Since the jump state addition increases the model's complexity, an effort was made to optimize the prediction process to ensure runtime efficiency. A few corrective steps were also implemented to ensure the partner character's motions appear natural. A user study was conducted to understand the naturalness of the synthesized motion as well as the control that the user has on the partner character's synthesized motion.", "fno": "08446498", "keywords": [ "Hidden Markov Models", "Humanities", "Image Motion Analysis", "Motion Control", "Virtual Partner Character", "Motion Capture", "Display Technologies", "HMM", "Dance Motion Control", "Hidden Markov Model", "State Transition", "Hidden Markov Models", "Robot Kinematics", "Virtual Reality", "Training", "Real Time Systems", "Runtime", "Human Centered Computing Interaction Paradigms Virtual Reality", "Computing Methodologies Computer Graphics Graphics Systems And Interfaces Virtual Reality", "Computing Methodologies Computer Graphics Animation Motion Capture" ], "authors": [ { "affiliation": "Southern Illinois University, Graphics & Entertainment Technology Lab, Carbondale, IL 62901, U.S.A.", "fullName": "Christos Mousas", "givenName": "Christos", "surname": "Mousas", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "57-64", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446295", "articleId": "13bd1ftOBDn", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446546", "articleId": "13bd1tl2om1", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2012/1611/0/06238894", "title": "Real-time body motion analysis for dance pattern recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06238894/12OmNBIFmrn", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2007/1016/0/04284997", "title": "Multicamera Audio-Visual Analysis of Dance Figures", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284997/12OmNBscCWD", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2009/3791/0/3791a171", "title": "Automatic Composition for Contemporary Dance Using 3D Motion Clips: Experiment on Dance Training and System Evaluation", "doi": null, "abstractUrl": "/proceedings-article/cw/2009/3791a171/12OmNwEJ0HF", "parentPublication": { "id": "proceedings/cw/2009/3791/0", "title": "2009 International Conference on CyberWorlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2014/6636/0/6636a088", "title": "Analysis and Design of Humanoid Robot Dance", "doi": null, "abstractUrl": "/proceedings-article/icicta/2014/6636a088/12OmNwpoFMM", "parentPublication": { "id": "proceedings/icicta/2014/6636/0", "title": "2014 7th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgames/2011/1451/0/06000356", "title": "Procedural generation of Cuban dance motion", "doi": null, "abstractUrl": "/proceedings-article/cgames/2011/06000356/12OmNxHrylJ", "parentPublication": { "id": "proceedings/cgames/2011/1451/0", "title": "2011 16th International Conference on Computer Games (CGAMES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2004/2122/0/21220857", "title": "Detecting Dance Motion Structure through Music Analysis", "doi": null, "abstractUrl": "/proceedings-article/fg/2004/21220857/12OmNxjjEdW", "parentPublication": { "id": "proceedings/fg/2004/2122/0", "title": "Sixth IEEE International Conference on Automatic Face and Gesture Recognition, 2004. Proceedings.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2015/9403/0/9403a200", "title": "Automatic Composition by Body-Part Motion Synthesis for Supporting Dance Creation", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a200/12OmNyOq55Y", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/03/ttg2012030501", "title": "Example-Based Automatic Music-Driven Conventional Dance Motion Synthesis", "doi": null, "abstractUrl": "/journal/tg/2012/03/ttg2012030501/13rRUwwaKt6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10018173", "title": "Keyframe Control of Music-driven 3D Dance Generation", "doi": null, "abstractUrl": "/journal/tg/5555/01/10018173/1JYZ6TXyjgk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2019/4540/0/08864530", "title": "Immersive Simulation and Training of Person-to-3D Character Dance in Real-Time", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2019/08864530/1e5ZqMvxwiY", "parentPublication": { "id": "proceedings/vs-games/2019/4540/0", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyjLoRk", "title": "Pattern Recognition in NeuroImaging, IEEE International Workshop on", "acronym": "prni", "groupId": "1800428", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNwfKjcT", "doi": "10.1109/PRNI.2012.34", "title": "Classification and Visualization of Multiclass fMRI Data Using Supervised Self-Organizing Maps", "normalizedTitle": "Classification and Visualization of Multiclass fMRI Data Using Supervised Self-Organizing Maps", "abstract": "So far, most fMRI studies that analyzed voxel activity patterns of more than two conditions transformed the multiclass problem into a series of binary problems. Furthermore, visualizations of the topology of underlying representations are usually not presented. Here, we explore the feasibility of different types of supervised self-organizing maps (SSOM) to decode and visualize voxel patterns of fMRI datasets consisting of multiple conditions. Our results suggest that - compared to commonly applied classification approaches - SSOMs are well suited when activity patterns consist of a small number of features (e.g. as in searchlight- or region of interest- based approaches). In addition, we demonstrate the utility of using SOM grids for intuitive and exploratory visualization of topological relations among classes of fMRI activity patterns.", "abstracts": [ { "abstractType": "Regular", "content": "So far, most fMRI studies that analyzed voxel activity patterns of more than two conditions transformed the multiclass problem into a series of binary problems. Furthermore, visualizations of the topology of underlying representations are usually not presented. Here, we explore the feasibility of different types of supervised self-organizing maps (SSOM) to decode and visualize voxel patterns of fMRI datasets consisting of multiple conditions. Our results suggest that - compared to commonly applied classification approaches - SSOMs are well suited when activity patterns consist of a small number of features (e.g. as in searchlight- or region of interest- based approaches). In addition, we demonstrate the utility of using SOM grids for intuitive and exploratory visualization of topological relations among classes of fMRI activity patterns.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "So far, most fMRI studies that analyzed voxel activity patterns of more than two conditions transformed the multiclass problem into a series of binary problems. Furthermore, visualizations of the topology of underlying representations are usually not presented. Here, we explore the feasibility of different types of supervised self-organizing maps (SSOM) to decode and visualize voxel patterns of fMRI datasets consisting of multiple conditions. Our results suggest that - compared to commonly applied classification approaches - SSOMs are well suited when activity patterns consist of a small number of features (e.g. as in searchlight- or region of interest- based approaches). In addition, we demonstrate the utility of using SOM grids for intuitive and exploratory visualization of topological relations among classes of fMRI activity patterns.", "fno": "4765a065", "keywords": [ "Self Organizing Maps", "F MRI", "Decoding", "Multiclass Classification" ], "authors": [ { "affiliation": null, "fullName": "Lars Haufeld", "givenName": "Lars", "surname": "Haufeld", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Roberta Santoro", "givenName": "Roberta", "surname": "Santoro", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Giancarlo Valente", "givenName": "Giancarlo", "surname": "Valente", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Elia Formisano", "givenName": "Elia", "surname": "Formisano", "__typename": "ArticleAuthorType" } ], "idPrefix": "prni", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-07-01T00:00:00", "pubType": "proceedings", "pages": "65-68", "year": "2012", "issn": null, "isbn": "978-1-4673-2182-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4765a061", "articleId": "12OmNwE9ODq", "__typename": "AdjacentArticleType" }, "next": { "fno": "4765a069", "articleId": "12OmNBW0vBf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/prni/2011/4399/0/4399a053", "title": "Exploring Whole Brain fMRI Data with Unsupervised Artificial Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/prni/2011/4399a053/12OmNAS9zI5", "parentPublication": { "id": "proceedings/prni/2011/4399/0", "title": "Pattern Recognition in NeuroImaging, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prni/2012/4765/0/4765a101", "title": "Connectivity-informed Sparse Classifiers for fMRI Brain Decoding", "doi": null, "abstractUrl": "/proceedings-article/prni/2012/4765a101/12OmNBO3JVE", "parentPublication": { "id": "proceedings/prni/2012/4765/0", "title": "Pattern Recognition in NeuroImaging, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwcdm/2011/4585/0/4585a047", "title": "Classifying Judging States from fMRI Data of Visual Recognition Task", "doi": null, "abstractUrl": "/proceedings-article/iwcdm/2011/4585a047/12OmNrAMEJN", "parentPublication": { "id": "proceedings/iwcdm/2011/4585/0", "title": "Complexity and Data Mining, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iita/2008/3497/2/3497b567", "title": "Discriminate Brain States from fMRI Images Using Fuzzy Support Vector Machines", "doi": null, "abstractUrl": "/proceedings-article/iita/2008/3497b567/12OmNrHjqNO", "parentPublication": { "id": "iita/2008/3497/2", "title": "2008 Second International Symposium on Intelligent Information Technology Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prni/2012/4765/0/4765a021", "title": "ICA Component Selection Based on Sparse Activelet Reconstruction for fMRI Analysis in Refractory Focal Epilepsy", "doi": null, "abstractUrl": "/proceedings-article/prni/2012/4765a021/12OmNvlg8o4", "parentPublication": { "id": "proceedings/prni/2012/4765/0", "title": "Pattern Recognition in NeuroImaging, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prni/2012/4765/0/4765a017", "title": "Towards Identification and Characterisation of Selective fMRI Feature Sets Using Independent Component Analysis", "doi": null, "abstractUrl": "/proceedings-article/prni/2012/4765a017/12OmNwE9OC1", "parentPublication": { "id": "proceedings/prni/2012/4765/0", "title": "Pattern Recognition in NeuroImaging, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prni/2012/4765/0/4765a061", "title": "Decoding Spontaneous Brain Activity from fMRI Using Gaussian Processes: Tracking Brain Reactivation", "doi": null, "abstractUrl": "/proceedings-article/prni/2012/4765a061/12OmNwE9ODq", "parentPublication": { "id": "proceedings/prni/2012/4765/0", "title": "Pattern Recognition in NeuroImaging, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995651", "title": "Generalized group sparse classifiers with application in fMRI brain decoding", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995651/12OmNwcUk3T", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prni/2011/4399/0/4399a065", "title": "Modeling Spatiotemporal Structure in fMRI Brain Decoding Using Generalized Sparse Classifiers", "doi": null, "abstractUrl": "/proceedings-article/prni/2011/4399a065/12OmNwdtwiE", "parentPublication": { "id": "proceedings/prni/2011/4399/0", "title": "Pattern Recognition in NeuroImaging, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prni/2012/4765/0/4765a033", "title": "Multivariate fMRI Analysis Using Optimally-discriminative Voxel-based Analysis", "doi": null, "abstractUrl": "/proceedings-article/prni/2012/4765a033/12OmNzXFoFi", "parentPublication": { "id": "proceedings/prni/2012/4765/0", "title": "Pattern Recognition in NeuroImaging, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzRZpZL", "title": "2008 IEEE International Symposium on Defect and Fault Tolerance of VLSI Systems", "acronym": "dft", "groupId": "1000190", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNwt5sja", "doi": "10.1109/DFT.2008.41", "title": "Fault Detection of Bloom Filters for Defect Maps", "normalizedTitle": "Fault Detection of Bloom Filters for Defect Maps", "abstract": "Bloom filters can be used as a data structure for defect maps in nanoscale memory. Unlike most other applications of Bloom filters, both false positive and false negative induced by a fault cause a fatal error in the memory system. In this paper, we present a technique for detecting faults in Bloom filters for defect maps. Spare hashing units and a simple coding technique for bit vectors are employed to detect faults during normal operation. Parallel write/read is also proposed to detect faults with high probability even without spare hashing units.", "abstracts": [ { "abstractType": "Regular", "content": "Bloom filters can be used as a data structure for defect maps in nanoscale memory. Unlike most other applications of Bloom filters, both false positive and false negative induced by a fault cause a fatal error in the memory system. In this paper, we present a technique for detecting faults in Bloom filters for defect maps. Spare hashing units and a simple coding technique for bit vectors are employed to detect faults during normal operation. Parallel write/read is also proposed to detect faults with high probability even without spare hashing units.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Bloom filters can be used as a data structure for defect maps in nanoscale memory. Unlike most other applications of Bloom filters, both false positive and false negative induced by a fault cause a fatal error in the memory system. In this paper, we present a technique for detecting faults in Bloom filters for defect maps. Spare hashing units and a simple coding technique for bit vectors are employed to detect faults during normal operation. Parallel write/read is also proposed to detect faults with high probability even without spare hashing units.", "fno": "3365a229", "keywords": [ "Fault Detection", "Bloom Filters", "Defect Maps" ], "authors": [ { "affiliation": null, "fullName": "Jae-Young Choi", "givenName": "Jae-Young", "surname": "Choi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yoon-Hwa Choi", "givenName": "Yoon-Hwa", "surname": "Choi", "__typename": "ArticleAuthorType" } ], "idPrefix": "dft", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-10-01T00:00:00", "pubType": "proceedings", "pages": "229-235", "year": "2008", "issn": "1550-5774", "isbn": "978-0-7695-3365-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3365a220", "articleId": "12OmNBqMDvP", "__typename": "AdjacentArticleType" }, "next": { "fno": "3365a236", "articleId": "12OmNs59JSB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acsac/2008/3447/0/3447a013", "title": "Practical Applications of Bloom Filters to the NIST RDS and Hard Drive Triage", "doi": null, "abstractUrl": "/proceedings-article/acsac/2008/3447a013/12OmNBp52HS", "parentPublication": { "id": "proceedings/acsac/2008/3447/0", "title": "2008 Annual Computer Security Applications Conference (ACSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2011/4337/0/4337a316", "title": "Evaluation of the Structured Bloom Filters Based on Similarity", "doi": null, "abstractUrl": "/proceedings-article/aina/2011/4337a316/12OmNCd2roo", "parentPublication": { "id": "proceedings/aina/2011/4337/0", "title": "2011 IEEE International Conference on Advanced Information Networking and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nswctc/2009/3610/1/3610a485", "title": "Memory Efficient Parallel Bloom Filters for String Matching", "doi": null, "abstractUrl": "/proceedings-article/nswctc/2009/3610a485/12OmNqGRGaq", "parentPublication": { "id": "proceedings/nswctc/2009/3610/1", "title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/chinagrid/2009/3818/0/3818a095", "title": "Distributed Metadata Management Based on Hierarchical Bloom Filters in Data Grid", "doi": null, "abstractUrl": "/proceedings-article/chinagrid/2009/3818a095/12OmNxAlA5S", "parentPublication": { "id": "proceedings/chinagrid/2009/3818/0", "title": "2009 Fourth ChinaGrid Annual Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfn/2010/3940/0/3940a404", "title": "A load-balancing scheme based on Bloom Filters", "doi": null, "abstractUrl": "/proceedings-article/icfn/2010/3940a404/12OmNyUWR16", "parentPublication": { "id": "proceedings/icfn/2010/3940/0", "title": "Future Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnp/2008/2506/0/04697026", "title": "Rank-indexed hashing: A compact construction of Bloom filters and variants", "doi": null, "abstractUrl": "/proceedings-article/icnp/2008/04697026/12OmNzC5TsG", "parentPublication": { "id": "proceedings/icnp/2008/2506/0", "title": "2008 IEEE International Conference on Network Protocols", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccad/2006/3891/0/04110261", "title": "On the Use of Bloom Filters for Defect Maps in Nanocomputing", "doi": null, "abstractUrl": "/proceedings-article/iccad/2006/04110261/12OmNzd7bgD", "parentPublication": { "id": "proceedings/iccad/2006/3891/0", "title": "Computer-Aided Design, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/nt/2002/05/01041067", "title": "Compressed Bloom filters", "doi": null, "abstractUrl": "/journal/nt/2002/05/01041067/13rRUEgs2qv", "parentPublication": { "id": "trans/nt", "title": "IEEE/ACM Transactions on Networking", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2010/01/ttd2010010020", "title": "Using Parallel Bloom Filters for Multiattribute Representation on Network Services", "doi": null, "abstractUrl": "/journal/td/2010/01/ttd2010010020/13rRUy0qnFX", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2010/01/ttk2010010120", "title": "The Dynamic Bloom Filters", "doi": null, "abstractUrl": "/journal/tk/2010/01/ttk2010010120/13rRUyeTVil", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBrDqEu", "title": "2005 International Symposium on Collaborative Technologies and Systems", "acronym": "iscst", "groupId": "1001747", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNwwMeWb", "doi": "10.1109/ISCST.2005.1553317", "title": "Visualization of interactions in an online collaboration environment", "normalizedTitle": "Visualization of interactions in an online collaboration environment", "abstract": "The use of computer-mediated communication and collaboration systems has become increasingly widespread over the past decade. These systems have also become the focus of research into online interaction and communication. Such research is facilitated by the use of information visualization, and a sampling of different types of visualization applied to computer-mediated communication and collaboration systems is given in this paper. We then present our own work on visualizing information from the LiveNet collaboration system. We have developed three distinct forms of visualizations: workspace network maps, workspace maps, and discussion maps, which are displayed using a modified form of an animated spring algorithm. We present examples of visualizations from the LiveNet system that allow certain aspects and patterns of the interaction carried out through it to be perceived.", "abstracts": [ { "abstractType": "Regular", "content": "The use of computer-mediated communication and collaboration systems has become increasingly widespread over the past decade. These systems have also become the focus of research into online interaction and communication. Such research is facilitated by the use of information visualization, and a sampling of different types of visualization applied to computer-mediated communication and collaboration systems is given in this paper. We then present our own work on visualizing information from the LiveNet collaboration system. We have developed three distinct forms of visualizations: workspace network maps, workspace maps, and discussion maps, which are displayed using a modified form of an animated spring algorithm. We present examples of visualizations from the LiveNet system that allow certain aspects and patterns of the interaction carried out through it to be perceived.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The use of computer-mediated communication and collaboration systems has become increasingly widespread over the past decade. These systems have also become the focus of research into online interaction and communication. Such research is facilitated by the use of information visualization, and a sampling of different types of visualization applied to computer-mediated communication and collaboration systems is given in this paper. We then present our own work on visualizing information from the LiveNet collaboration system. We have developed three distinct forms of visualizations: workspace network maps, workspace maps, and discussion maps, which are displayed using a modified form of an animated spring algorithm. We present examples of visualizations from the LiveNet system that allow certain aspects and patterns of the interaction carried out through it to be perceived.", "fno": "01553317", "keywords": [ "Animated Spring Algorithm", "Online Interaction Visualization", "Online Collaboration Environment", "Computer Mediated Communication", "Computer Mediated Collaboration System", "Information Visualization", "Live Net Collaboration System", "Workspace Network Maps", "Workspace Maps", "Discussion Maps" ], "authors": [ { "affiliation": "Fac. of Sci.&Technol., Univ. of Macau, China", "fullName": "R.P. Biuk-Aghai", "givenName": "R.P.", "surname": "Biuk-Aghai", "__typename": "ArticleAuthorType" } ], "idPrefix": "iscst", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-05-01T00:00:00", "pubType": "proceedings", "pages": "228-235", "year": "2005", "issn": null, "isbn": "0-7695-2387-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01553316", "articleId": "12OmNwDACqC", "__typename": "AdjacentArticleType" }, "next": { "fno": "01553318", "articleId": "12OmNwErpK8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icita/2005/2316/2/231620142", "title": "Employing Wikis for Online Collaboration in the E-Learning Environment: Case Study", "doi": null, "abstractUrl": "/proceedings-article/icita/2005/231620142/12OmNC4eSro", "parentPublication": { "id": "proceedings/icita/2005/2316/2", "title": "Information Technology and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/passat-socialcom/2011/1931/0/06113214", "title": "A Study of Social Interactions in Online Health Communities", "doi": null, "abstractUrl": "/proceedings-article/passat-socialcom/2011/06113214/12OmNCesrcd", "parentPublication": { "id": "proceedings/passat-socialcom/2011/1931/0", "title": "2011 IEEE Third Int'l Conference on Privacy, Security, Risk and Trust (PASSAT) / 2011 IEEE Third Int'l Conference on Social Computing (SocialCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipcc/2005/9027/0/01494156", "title": "Making connections: an intercultural virtual team project in professional communication", "doi": null, "abstractUrl": "/proceedings-article/ipcc/2005/01494156/12OmNqFrGEE", "parentPublication": { "id": "proceedings/ipcc/2005/9027/0", "title": "2005 IEEE International Professional Communication Conference (IPCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/colcom/2005/0030/0/01651233", "title": "Supporting workspace-mediated interaction in collaborative presentations with CoPowerPoint", "doi": null, "abstractUrl": "/proceedings-article/colcom/2005/01651233/12OmNqHItMH", "parentPublication": { "id": "proceedings/colcom/2005/0030/0", "title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icebe/2013/5111/0/5111a484", "title": "Integrating PowerMeeting into Blackboard Learning Environment: Synchronous Collaboration Support in Asynchronous Collaboration Context", "doi": null, "abstractUrl": "/proceedings-article/icebe/2013/5111a484/12OmNxvwp2z", "parentPublication": { "id": "proceedings/icebe/2013/5111/0", "title": "2013 IEEE 10th International Conference on e-Business Engineering (ICEBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/criwg/2000/0828/0/08280052", "title": "Supporting Handheld Collaboration through COMAL", "doi": null, "abstractUrl": "/proceedings-article/criwg/2000/08280052/12OmNyQ7G8s", "parentPublication": { "id": "proceedings/criwg/2000/0828/0", "title": "Groupware, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2004/2177/0/21770693", "title": "A New Visualization Approach for Supporting Knowledge Management and Collaboration in E-Learning", "doi": null, "abstractUrl": "/proceedings-article/iv/2004/21770693/12OmNzEVRYx", "parentPublication": { "id": "proceedings/iv/2004/2177/0", "title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2006/0256/0/04117216", "title": "The Effect of Individual and Group Characteristics on Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/fie/2006/04117216/12OmNzd7bD8", "parentPublication": { "id": "proceedings/fie/2006/0256/0", "title": "Proceedings. Frontiers in Education. 36th Annual Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/06/ttg2009061065", "title": "Lark: Coordinating Co-located Collaboration with Information Visualization", "doi": null, "abstractUrl": "/journal/tg/2009/06/ttg2009061065/13rRUynHuj4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2007/2755/0/04076455", "title": "A Framework for Workgroup Collaboration in a Vir tual Environment: Theoretical Synthesis and Empir ical Exploration", "doi": null, "abstractUrl": "/proceedings-article/hicss/2007/04076455/17D45VsBTZb", "parentPublication": { "id": "proceedings/hicss/2007/2755/0", "title": "2007 40th Annual Hawaii International Conference on System Sciences (HICSS'07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqFJhOG", "title": "2008 15th Working Conference on Reverse Engineering", "acronym": "wcre", "groupId": "1000635", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNyRPgQL", "doi": "10.1109/WCRE.2008.45", "title": "Consistent Layout for Thematic Software Maps", "normalizedTitle": "Consistent Layout for Thematic Software Maps", "abstract": "Software visualizations can provide a concise overview of a complex software system.Unfortunately, since software has no physical shape, there is no ``natural'' mapping of software to a two-dimensional space. As a consequence most visualizations tend to use a layout in which position and distance have no meaning, and consequently layout typical diverges from one visualization to another.We propose a consistent layout for software maps in which the position of a software artifact reflects its vocabulary, and distance corresponds to similarity of vocabulary.We use Latent Semantic Indexing (LSI) to map software artifacts to a vector space, and then use Multidimensional Scaling (MDS) to map this vector space down to two dimensions.The resulting consistent layout allows us to develop a variety of thematic software maps that express very different aspects of software while making it easy to compare them.The approach is especially suitable for comparing views of evolving software, since the vocabulary of software artifacts tends to be stable over time.", "abstracts": [ { "abstractType": "Regular", "content": "Software visualizations can provide a concise overview of a complex software system.Unfortunately, since software has no physical shape, there is no ``natural'' mapping of software to a two-dimensional space. As a consequence most visualizations tend to use a layout in which position and distance have no meaning, and consequently layout typical diverges from one visualization to another.We propose a consistent layout for software maps in which the position of a software artifact reflects its vocabulary, and distance corresponds to similarity of vocabulary.We use Latent Semantic Indexing (LSI) to map software artifacts to a vector space, and then use Multidimensional Scaling (MDS) to map this vector space down to two dimensions.The resulting consistent layout allows us to develop a variety of thematic software maps that express very different aspects of software while making it easy to compare them.The approach is especially suitable for comparing views of evolving software, since the vocabulary of software artifacts tends to be stable over time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Software visualizations can provide a concise overview of a complex software system.Unfortunately, since software has no physical shape, there is no ``natural'' mapping of software to a two-dimensional space. As a consequence most visualizations tend to use a layout in which position and distance have no meaning, and consequently layout typical diverges from one visualization to another.We propose a consistent layout for software maps in which the position of a software artifact reflects its vocabulary, and distance corresponds to similarity of vocabulary.We use Latent Semantic Indexing (LSI) to map software artifacts to a vector space, and then use Multidimensional Scaling (MDS) to map this vector space down to two dimensions.The resulting consistent layout allows us to develop a variety of thematic software maps that express very different aspects of software while making it easy to compare them.The approach is especially suitable for comparing views of evolving software, since the vocabulary of software artifacts tends to be stable over time.", "fno": "3429a209", "keywords": [ "Software Visualization Program Comprehension" ], "authors": [ { "affiliation": null, "fullName": "Adrian Kuhn", "givenName": "Adrian", "surname": "Kuhn", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Peter Loretan", "givenName": "Peter", "surname": "Loretan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Oscar Nierstrasz", "givenName": "Oscar", "surname": "Nierstrasz", "__typename": "ArticleAuthorType" } ], "idPrefix": "wcre", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-10-01T00:00:00", "pubType": "proceedings", "pages": "209-218", "year": "2008", "issn": "1095-1350", "isbn": "978-0-7695-3429-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3429a202", "articleId": "12OmNCzKlLk", "__typename": "AdjacentArticleType" }, "next": { "fno": "3429a219", "articleId": "12OmNyRPgRL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/csmr/2012/4666/0/4666a441", "title": "Identifying Knowledge Divergence by Vocabulary Monitoring in Software Projects", "doi": null, "abstractUrl": "/proceedings-article/csmr/2012/4666a441/12OmNA14Ag2", "parentPublication": { "id": "proceedings/csmr/2012/4666/0", "title": "2012 16th European Conference on Software Maintenance and Reengineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apsec/2009/3909/0/3909a373", "title": "Code Clone Graph Metrics for Detecting Diffused Code Clones", "doi": null, "abstractUrl": "/proceedings-article/apsec/2009/3909a373/12OmNAq3hHd", "parentPublication": { "id": "proceedings/apsec/2009/3909/0", "title": "2009 16th Asia-Pacific Software Engineering Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpc/2011/4398/0/4398a254", "title": "A Systematic Analysis of Software Architecture Visualization Techniques", "doi": null, "abstractUrl": "/proceedings-article/icpc/2011/4398a254/12OmNAu1Fk5", "parentPublication": { "id": "proceedings/icpc/2011/4398/0", "title": "International Conference on Program Comprehension", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2000/0743/0/07430198", "title": "Virtual but Visible Software", "doi": null, "abstractUrl": "/proceedings-article/iv/2000/07430198/12OmNvT2p7B", "parentPublication": { "id": "proceedings/iv/2000/0743/0", "title": "2000 IEEE Conference on Information Visualization. An International Conference on Computer Visualization and Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esem/2011/4604/0/4604a127", "title": "Exploring Software Measures to Assess Program Comprehension", "doi": null, "abstractUrl": "/proceedings-article/esem/2011/4604a127/12OmNxXCGIV", "parentPublication": { "id": "proceedings/esem/2011/4604/0", "title": "2011 International Symposium on Empirical Software Engineering and Measurement", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156379", "title": "Clutter-aware label layout", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156379/12OmNyY4rqE", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2006/04/s4084", "title": "Project Visualization for Software", "doi": null, "abstractUrl": "/magazine/so/2006/04/s4084/13rRUxZRbma", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2002/05/e0463", "title": "The Effectiveness of Control Structure Diagrams in Source Code Comprehension Activities", "doi": null, "abstractUrl": "/journal/ts/2002/05/e0463/13rRUy0HYLv", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/testvis/2022/9627/0/962700a015", "title": "Test Intelligence: How Modern Analyses and Visualizations in Teamscale Support Software Testing", "doi": null, "abstractUrl": "/proceedings-article/testvis/2022/962700a015/1JgrV7jaEHS", "parentPublication": { "id": "proceedings/testvis/2022/9627/0", "title": "2022 First International Workshop on Visualization in Testing of Hardware, Software, and Manufacturing (TestVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10029921", "title": "Semi-Automatic Layout Adaptation for Responsive Multiple-View Visualization Design", "doi": null, "abstractUrl": "/journal/tg/5555/01/10029921/1KmyX4gJuMg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwbcJ4L", "title": "2008 9th International Conference for Young Computer Scientists", "acronym": "icycs", "groupId": "1002545", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNyfdOOB", "doi": "10.1109/ICYCS.2008.258", "title": "Optimization of One-Dimensional Coded Modulations Using Chaotic Maps", "normalizedTitle": "Optimization of One-Dimensional Coded Modulations Using Chaotic Maps", "abstract": "In order to improve the bit error rate performance of one-dimensional coded modulations using chaotic maps, an optimization process is performed in this paper. Based on the designing methods of conventional trellis coded modulation the minimum free Euclidean distance between signal sequences of chaos-based coded modulations is maximized. Computer search method is then used to find the coded modulations with better minimum distance properties. Finally, bit error rate simulations are given to confirm our optimization results.", "abstracts": [ { "abstractType": "Regular", "content": "In order to improve the bit error rate performance of one-dimensional coded modulations using chaotic maps, an optimization process is performed in this paper. Based on the designing methods of conventional trellis coded modulation the minimum free Euclidean distance between signal sequences of chaos-based coded modulations is maximized. Computer search method is then used to find the coded modulations with better minimum distance properties. Finally, bit error rate simulations are given to confirm our optimization results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In order to improve the bit error rate performance of one-dimensional coded modulations using chaotic maps, an optimization process is performed in this paper. Based on the designing methods of conventional trellis coded modulation the minimum free Euclidean distance between signal sequences of chaos-based coded modulations is maximized. Computer search method is then used to find the coded modulations with better minimum distance properties. Finally, bit error rate simulations are given to confirm our optimization results.", "fno": "3398d051", "keywords": [ "Trellis Coded Modulation", "Minimum Distance", "Optimization", "Chaotic Maps" ], "authors": [ { "affiliation": null, "fullName": "Heng Song", "givenName": "Heng", "surname": "Song", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jingbo Guo", "givenName": "Jingbo", "surname": "Guo", "__typename": "ArticleAuthorType" } ], "idPrefix": "icycs", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-11-01T00:00:00", "pubType": "proceedings", "pages": "3051-3056", "year": "2008", "issn": null, "isbn": "978-0-7695-3398-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3398d045", "articleId": "12OmNzd7bW5", "__typename": "AdjacentArticleType" }, "next": { "fno": "3398d057", "articleId": "12OmNxwnciB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/aict/2008/3162/0/3162a355", "title": "Cooperative Diversity with Orthogonal Space Time Coded MSK", "doi": null, "abstractUrl": "/proceedings-article/aict/2008/3162a355/12OmNAolGPi", "parentPublication": { "id": "proceedings/aict/2008/3162/0", "title": "Advanced International Conference on Telecommunications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/lcn/2010/8387/0/p192almasalha", "title": "Scalable encryption of variable length coded video bit streams", "doi": null, "abstractUrl": "/proceedings-article/lcn/2010/p192almasalha/12OmNC3Xhs4", "parentPublication": { "id": "proceedings/lcn/2010/8387/0", "title": "IEEE Local Computer Network Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/1/81831592", "title": "Compression of synthetic aperture radar phase history data using trellis coded quantization techniques", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81831592/12OmNrYlmFD", "parentPublication": { "id": "proceedings/icip/1997/8183/1", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aict/2009/3611/0/3611a152", "title": "Chaotic Digital Encoding for 2D Trellis-Coded Modulation", "doi": null, "abstractUrl": "/proceedings-article/aict/2009/3611a152/12OmNx9WSYQ", "parentPublication": { "id": "proceedings/aict/2009/3611/0", "title": "Advanced International Conference on Telecommunications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieec/2009/3686/0/3686a085", "title": "Color Image Cryptography Using Multiple One-Dimensional Chaotic Maps and OCML", "doi": null, "abstractUrl": "/proceedings-article/ieec/2009/3686a085/12OmNxFJXzI", "parentPublication": { "id": "proceedings/ieec/2009/3686/0", "title": "2009 International Symposium on Information Engineering and Electronic Commerce (IEEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2005/9331/0/01521531", "title": "Optimal packetization of VLC and convolution coded Markov sequences", "doi": null, "abstractUrl": "/proceedings-article/icme/2005/01521531/12OmNxR5UIA", "parentPublication": { "id": "proceedings/icme/2005/9331/0", "title": "2005 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsa/2008/3243/0/3243a321", "title": "Matlab Toolbox and GUI for Analyzing One-Dimensional Chaotic Maps", "doi": null, "abstractUrl": "/proceedings-article/iccsa/2008/3243a321/12OmNxR5UQg", "parentPublication": { "id": "proceedings/iccsa/2008/3243/0", "title": "2008 International Conference on Computational Sciences and Its Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/1995/7075/0/70750364", "title": "Trellis-coded M-ary orthogonal modulation", "doi": null, "abstractUrl": "/proceedings-article/iscc/1995/70750364/12OmNyaGeMj", "parentPublication": { "id": "proceedings/iscc/1995/7075/0", "title": "Proceedings IEEE Symposium on Computers and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/2017/1629/0/08024617", "title": "Software-Defined Radio proof-of-concept for chaos-based coded modulations", "doi": null, "abstractUrl": "/proceedings-article/iscc/2017/08024617/12OmNz5JBWG", "parentPublication": { "id": "proceedings/iscc/2017/1629/0", "title": "2017 IEEE Symposium on Computers and Communications (ISCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031d684", "title": "Efficient Rate-Adaptive Modulation for LDPC-Coded OFDM System", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031d684/12OmNzvQHXk", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAWH9tH", "title": "2011 IEEE Pacific Visualization Symposium (PacificVis)", "acronym": "pacificvis", "groupId": "1001657", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNyvY9ru", "doi": "10.1109/PACIFICVIS.2011.5742375", "title": "Edge maps: Representing flow with bounded error", "normalizedTitle": "Edge maps: Representing flow with bounded error", "abstract": "Robust analysis of vector fields has been established as an important tool for deriving insights from the complex systems these fields model. Many analysis techniques rely on computing streamlines, a task often hampered by numerical instabilities. Approaches that ignore the resulting errors can lead to inconsistencies that may produce unreliable visualizations and ultimately prevent in-depth analysis. We propose a new representation for vector fields on surfaces that replaces numerical integration through triangles with linear maps defined on its boundary. This representation, called edge maps, is equivalent to computing all possible streamlines at a user defined error threshold. In spite of this error, all the streamlines computed using edge maps will be pairwise disjoint. Furthermore, our representation stores the error explicitly, and thus can be used to produce more informative visualizations. Given a piecewise-linear interpolated vector field, a recent result [15] shows that there are only 23 possible map classes for a triangle, permitting a concise description of flow behaviors. This work describes the details of computing edge maps, provides techniques to quantify and refine edge map error, and gives qualitative and visual comparisons to more traditional techniques.", "abstracts": [ { "abstractType": "Regular", "content": "Robust analysis of vector fields has been established as an important tool for deriving insights from the complex systems these fields model. Many analysis techniques rely on computing streamlines, a task often hampered by numerical instabilities. Approaches that ignore the resulting errors can lead to inconsistencies that may produce unreliable visualizations and ultimately prevent in-depth analysis. We propose a new representation for vector fields on surfaces that replaces numerical integration through triangles with linear maps defined on its boundary. This representation, called edge maps, is equivalent to computing all possible streamlines at a user defined error threshold. In spite of this error, all the streamlines computed using edge maps will be pairwise disjoint. Furthermore, our representation stores the error explicitly, and thus can be used to produce more informative visualizations. Given a piecewise-linear interpolated vector field, a recent result [15] shows that there are only 23 possible map classes for a triangle, permitting a concise description of flow behaviors. This work describes the details of computing edge maps, provides techniques to quantify and refine edge map error, and gives qualitative and visual comparisons to more traditional techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Robust analysis of vector fields has been established as an important tool for deriving insights from the complex systems these fields model. Many analysis techniques rely on computing streamlines, a task often hampered by numerical instabilities. Approaches that ignore the resulting errors can lead to inconsistencies that may produce unreliable visualizations and ultimately prevent in-depth analysis. We propose a new representation for vector fields on surfaces that replaces numerical integration through triangles with linear maps defined on its boundary. This representation, called edge maps, is equivalent to computing all possible streamlines at a user defined error threshold. In spite of this error, all the streamlines computed using edge maps will be pairwise disjoint. Furthermore, our representation stores the error explicitly, and thus can be used to produce more informative visualizations. Given a piecewise-linear interpolated vector field, a recent result [15] shows that there are only 23 possible map classes for a triangle, permitting a concise description of flow behaviors. This work describes the details of computing edge maps, provides techniques to quantify and refine edge map error, and gives qualitative and visual comparisons to more traditional techniques.", "fno": "05742375", "keywords": [ "Data Visualisation", "Edge Maps", "Flow Represention", "Bounded Error", "Vector Fields", "Numerical Instabilities", "Data Visualizations", "In Depth Analysis", "Vector Field Representation", "User Defined Error Threshold", "Piecewise Linear Interpolated Vector Field", "Vectors", "Image Edge Detection", "Merging", "Interpolation", "Piecewise Linear Approximation", "Skeleton", "Vector Fields", "Error Quantification", "Edge Maps" ], "authors": [ { "affiliation": "SCI Institute, Univ. of Utah, USA", "fullName": "Harsh Bhatia", "givenName": "Harsh", "surname": "Bhatia", "__typename": "ArticleAuthorType" }, { "affiliation": "SCI Institute, Univ. of Utah, USA", "fullName": "Shreeraj Jadhav", "givenName": "Shreeraj", "surname": "Jadhav", "__typename": "ArticleAuthorType" }, { "affiliation": "Lawrence Livermore National Lab, USA", "fullName": "Peer-Timo Bremer", "givenName": "Peer-Timo", "surname": "Bremer", "__typename": "ArticleAuthorType" }, { "affiliation": "SCI Institute, Univ. of Utah, USA", "fullName": "Guoning Chen", "givenName": "Guoning", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "SCI Institute, Univ. of Utah, USA", "fullName": "Joshua A. Levine", "givenName": "Joshua A.", "surname": "Levine", "__typename": "ArticleAuthorType" }, { "affiliation": "Universidade de São Paulo, Brazil", "fullName": "Luis Gustavo Nonato", "givenName": "Luis Gustavo", "surname": "Nonato", "__typename": "ArticleAuthorType" }, { "affiliation": "SCI Institute, Univ. of Utah, USA", "fullName": "Valerio Pascucci", "givenName": "Valerio", "surname": "Pascucci", "__typename": "ArticleAuthorType" } ], "idPrefix": "pacificvis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-03-01T00:00:00", "pubType": "proceedings", "pages": "75-82", "year": "2011", "issn": "2165-8765", "isbn": "978-1-61284-935-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05742374", "articleId": "12OmNwI8cgb", "__typename": "AdjacentArticleType" }, "next": { "fno": "05742376", "articleId": "12OmNqyDjoV", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iihmsp/2006/2745/0/04041690", "title": "Recovering Intrinsic Images from Weighted Edge Maps", "doi": null, "abstractUrl": "/proceedings-article/iihmsp/2006/04041690/12OmNCfjeyV", "parentPublication": { "id": "proceedings/iihmsp/2006/2745/0", "title": "2006 International Conference on Intelligent Information Hiding and Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2006/2745/0/27450159", "title": "Recovering Intrinsic Images from Weighted Edge Maps", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2006/27450159/12OmNqGA55h", "parentPublication": { "id": "proceedings/iih-msp/2006/2745/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1995/7310/3/73103113", "title": "Edge behavior of error diffusion", "doi": null, "abstractUrl": "/proceedings-article/icip/1995/73103113/12OmNqzcvLk", "parentPublication": { "id": "proceedings/icip/1995/7310/3", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a936", "title": "Learning Informative Edge Maps for Indoor Scene Layout Prediction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a936/12OmNvSKNTq", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118a248", "title": "How to Evaluate Foreground Maps", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118a248/12OmNzVGcOM", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1998/8821/2/882120550", "title": "Wavelet-based multiresolution edge detection utilizing gray level edge maps", "doi": null, "abstractUrl": "/proceedings-article/icip/1998/882120550/12OmNzxgHCH", "parentPublication": { "id": "proceedings/icip/1998/8821/3", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1448", "title": "Similarity-Guided Streamline Placement with Error Evaluation", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1448/13rRUEgs2BN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/09/06051431", "title": "Flow Visualization with Quantified Spatial and Temporal Errors Using Edge Maps", "doi": null, "abstractUrl": "/journal/tg/2012/09/06051431/13rRUx0gev6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875962", "title": "Escape Maps", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875962/13rRUxYINfc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1993/12/i1241", "title": "The Integration of Image Segmentation Maps using Region and Edge Information", "doi": null, "abstractUrl": "/journal/tp/1993/12/i1241/13rRUy3xY8X", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxwWoqU", "title": "Image Processing, International Conference on", "acronym": "icip", "groupId": "1000349", "volume": "2", "displayVolume": "2", "year": "1998", "__typename": "ProceedingType" }, "article": { "id": "12OmNzxgHCH", "doi": "10.1109/ICIP.1998.723506", "title": "Wavelet-based multiresolution edge detection utilizing gray level edge maps", "normalizedTitle": "Wavelet-based multiresolution edge detection utilizing gray level edge maps", "abstract": "Edge detection is an important low-level vision problem. Most edge detection methods operate on an image at a single resolution and output a binary edge map. Edges within an image, however, generally occur at various resolutions, or scales, and represent transitions of different degrees, or gradient levels. Thus, single resolution edge detection methods that output binary edge maps do not always yield satisfactory results. This paper develops a multiresolution edge detection method that utilizes a multirate wavelet decomposition to generate a series of images with progressively lower edge resolution. Edges are then recursively extracted to form series of edge maps where the output is not restricted to be binary and is set to reflect the gradient level at each edge point. The series of edge maps is restricted to form a stacking edge map pyramid. In this formulation, the base (lowest level) edge map contains edges at all scales while edge pruning, based on edge scale, is performed at subsequent levels. This approach is shown to have advantages over previously defined multiresolution edge detection methods. Results are presented using natural seen images. These results are motivated by tactile imaging, an important problem in making visual information accessible to blind individuals", "abstracts": [ { "abstractType": "Regular", "content": "Edge detection is an important low-level vision problem. Most edge detection methods operate on an image at a single resolution and output a binary edge map. Edges within an image, however, generally occur at various resolutions, or scales, and represent transitions of different degrees, or gradient levels. Thus, single resolution edge detection methods that output binary edge maps do not always yield satisfactory results. This paper develops a multiresolution edge detection method that utilizes a multirate wavelet decomposition to generate a series of images with progressively lower edge resolution. Edges are then recursively extracted to form series of edge maps where the output is not restricted to be binary and is set to reflect the gradient level at each edge point. The series of edge maps is restricted to form a stacking edge map pyramid. In this formulation, the base (lowest level) edge map contains edges at all scales while edge pruning, based on edge scale, is performed at subsequent levels. This approach is shown to have advantages over previously defined multiresolution edge detection methods. Results are presented using natural seen images. These results are motivated by tactile imaging, an important problem in making visual information accessible to blind individuals", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Edge detection is an important low-level vision problem. Most edge detection methods operate on an image at a single resolution and output a binary edge map. Edges within an image, however, generally occur at various resolutions, or scales, and represent transitions of different degrees, or gradient levels. Thus, single resolution edge detection methods that output binary edge maps do not always yield satisfactory results. This paper develops a multiresolution edge detection method that utilizes a multirate wavelet decomposition to generate a series of images with progressively lower edge resolution. Edges are then recursively extracted to form series of edge maps where the output is not restricted to be binary and is set to reflect the gradient level at each edge point. The series of edge maps is restricted to form a stacking edge map pyramid. In this formulation, the base (lowest level) edge map contains edges at all scales while edge pruning, based on edge scale, is performed at subsequent levels. This approach is shown to have advantages over previously defined multiresolution edge detection methods. Results are presented using natural seen images. These results are motivated by tactile imaging, an important problem in making visual information accessible to blind individuals", "fno": "882120550", "keywords": [], "authors": [ { "affiliation": "Dept. of Electr. & Comput. Eng., Delaware Univ., Newark, DE", "fullName": "J.I. Siddique", "givenName": "J.I.", "surname": "Siddique", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electr. & Comput. Eng., Delaware Univ., Newark, DE", "fullName": "K.E. Barner", "givenName": "K.E.", "surname": "Barner", "__typename": "ArticleAuthorType" } ], "idPrefix": "icip", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1998-10-01T00:00:00", "pubType": "proceedings", "pages": "550", "year": "1998", "issn": null, "isbn": "0-8186-8821-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "882120545", "articleId": "12OmNC943Ps", "__typename": "AdjacentArticleType" }, "next": { "fno": "882120555", "articleId": "12OmNApLGNk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icc/2009/3538/0/3538a048", "title": "2-Dimensional Geometric Transforms for Edge Detection", "doi": null, "abstractUrl": "/proceedings-article/icc/2009/3538a048/12OmNApcunN", "parentPublication": { "id": "proceedings/icc/2009/3538/0", "title": "Computing, Engineering and Information, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iihmsp/2006/2745/0/04041690", "title": "Recovering Intrinsic Images from Weighted Edge Maps", "doi": null, "abstractUrl": "/proceedings-article/iihmsp/2006/04041690/12OmNCfjeyV", "parentPublication": { "id": "proceedings/iihmsp/2006/2745/0", "title": "2006 International Conference on Intelligent Information Hiding and Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2006/2745/0/27450159", "title": "Recovering Intrinsic Images from Weighted Edge Maps", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2006/27450159/12OmNqGA55h", "parentPublication": { "id": "proceedings/iih-msp/2006/2745/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a936", "title": "Learning Informative Edge Maps for Indoor Scene Layout Prediction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a936/12OmNvSKNTq", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1988/0878/0/00028336", "title": "Multiresolution relaxation: experiments and evaluations", "doi": null, "abstractUrl": "/proceedings-article/icpr/1988/00028336/12OmNxE2n0P", "parentPublication": { "id": "proceedings/icpr/1988/0878/0", "title": "9th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1990/2062/1/00118095", "title": "Spatio-temporal edge focusing", "doi": null, "abstractUrl": "/proceedings-article/icpr/1990/00118095/12OmNxYtu8m", "parentPublication": { "id": "proceedings/icpr/1990/2062/1", "title": "Proceedings 10th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1995/7310/1/73100041", "title": "Multiresolution sequential edge linking", "doi": null, "abstractUrl": "/proceedings-article/icip/1995/73100041/12OmNyKrH36", "parentPublication": { "id": "proceedings/icip/1995/7310/1", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460720", "title": "Edge detection for facial images under noisy conditions", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460720/12OmNyaoDwg", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmlc/2003/7865/5/01260010", "title": "A motion parameter estimation algorithm based on the multiresolution edge detection", "doi": null, "abstractUrl": "/proceedings-article/icmlc/2003/01260010/12OmNzYwc4m", "parentPublication": { "id": "proceedings/icmlc/2003/7865/1", "title": "Proceedings of the 2003 International Conference on Machine Learning and Cybernetics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1992/03/i0384", "title": "Kernel Designs for Efficient Multiresolution Edge Detection and Orientation Estimation", "doi": null, "abstractUrl": "/journal/tp/1992/03/i0384/13rRUxNW206", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hJrHq07uw0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "acronym": "big-data", "groupId": "1802964", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hJsA3rEATe", "doi": "10.1109/BigData47090.2019.9006055", "title": "Fast Self-Organizing Maps Training", "normalizedTitle": "Fast Self-Organizing Maps Training", "abstract": "Self-organizing maps are an unsupervised machine learning technique that offers interpretable results by identifying topological properties in high-dimensional datasets and projecting them on a 2-dimensional grid. An important problem of self-organizing maps is the computational expensiveness of their training phase. In this paper, we propose a fast approach to train self-organizing maps. The approach consists of 2 steps. First, a small map identifies the most relevant areas from the entire high-dimensional input space. Then a larger map (initialized from the small one) is fine-tuned to further explore the local areas identified in the first step. The resulting map has performance (measured in terms of accuracy and quantization error) on par with self-organizing maps trained with the standard approach, but with a significantly reduced training time.", "abstracts": [ { "abstractType": "Regular", "content": "Self-organizing maps are an unsupervised machine learning technique that offers interpretable results by identifying topological properties in high-dimensional datasets and projecting them on a 2-dimensional grid. An important problem of self-organizing maps is the computational expensiveness of their training phase. In this paper, we propose a fast approach to train self-organizing maps. The approach consists of 2 steps. First, a small map identifies the most relevant areas from the entire high-dimensional input space. Then a larger map (initialized from the small one) is fine-tuned to further explore the local areas identified in the first step. The resulting map has performance (measured in terms of accuracy and quantization error) on par with self-organizing maps trained with the standard approach, but with a significantly reduced training time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Self-organizing maps are an unsupervised machine learning technique that offers interpretable results by identifying topological properties in high-dimensional datasets and projecting them on a 2-dimensional grid. An important problem of self-organizing maps is the computational expensiveness of their training phase. In this paper, we propose a fast approach to train self-organizing maps. The approach consists of 2 steps. First, a small map identifies the most relevant areas from the entire high-dimensional input space. Then a larger map (initialized from the small one) is fine-tuned to further explore the local areas identified in the first step. The resulting map has performance (measured in terms of accuracy and quantization error) on par with self-organizing maps trained with the standard approach, but with a significantly reduced training time.", "fno": "09006055", "keywords": [ "Data Handling", "Self Organising Feature Maps", "Topology", "Unsupervised Learning", "Unsupervised Machine Learning Technique", "High Dimensional Datasets", "2 Dimensional Grid", "High Dimensional Input Space", "Self Organizing Maps Training", "Topological Properties", "Self Organizing Feature Maps", "Training", "Standards", "Buildings", "Neurons", "Quantization Signal", "Complexity Theory", "Self Organizing Maps", "Unsupervised Learning", "Fast Training", "Clustering" ], "authors": [ { "affiliation": "Politecnico di Torino,Department of Control and Computer Engineering,Turin,Italy", "fullName": "Flavio Giobergia", "givenName": "Flavio", "surname": "Giobergia", "__typename": "ArticleAuthorType" }, { "affiliation": "Politecnico di Torino,Department of Control and Computer Engineering,Turin,Italy", "fullName": "Elena Baralis", "givenName": "Elena", "surname": "Baralis", "__typename": "ArticleAuthorType" } ], "idPrefix": "big-data", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-12-01T00:00:00", "pubType": "proceedings", "pages": "2257-2266", "year": "2019", "issn": null, "isbn": "978-1-7281-0858-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09006284", "articleId": "1hJsnmuu3NC", "__typename": "AdjacentArticleType" }, "next": { "fno": "09005492", "articleId": "1hJrXIRosWQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ijcnn/2000/0619/4/06194259", "title": "Self-Organizing Maps in Adaptive Health Monitoring", "doi": null, "abstractUrl": "/proceedings-article/ijcnn/2000/06194259/12OmNrJAdXT", "parentPublication": { "id": "proceedings/ijcnn/2000/0619/4", "title": "Neural Networks, IEEE - INNS - ENNS International Joint Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/delta/2002/1453/0/14530321", "title": "On the Initialization and Training Methods for Kohonen Self-Organizing Feature Maps in Color Image Quantization", "doi": null, "abstractUrl": "/proceedings-article/delta/2002/14530321/12OmNvoFjQ1", "parentPublication": { "id": "proceedings/delta/2002/1453/0", "title": "Proceedings First IEEE International Workshop on Electronic Design, Test and Applications '2002", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2014/3187/0/06822225", "title": "Application of Self-Organizing Maps at Change Detection in Amazon Forest", "doi": null, "abstractUrl": "/proceedings-article/itng/2014/06822225/12OmNx0A7Mt", "parentPublication": { "id": "proceedings/itng/2014/3187/0", "title": "2014 Eleventh International Conference on Information Technology: New Generations (ITNG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/synasc/2015/0461/0/0461a468", "title": "Unsupervised Aspect Level Sentiment Analysis Using Self-Organizing Maps", "doi": null, "abstractUrl": "/proceedings-article/synasc/2015/0461a468/12OmNx7XH7B", "parentPublication": { "id": "proceedings/synasc/2015/0461/0", "title": "2015 17th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mcsul/2009/3976/0/3976a125", "title": "Self Organizing Maps for AUVs Mapping", "doi": null, "abstractUrl": "/proceedings-article/mcsul/2009/3976a125/12OmNxbW4Pd", "parentPublication": { "id": "proceedings/mcsul/2009/3976/0", "title": "Computational Modeling, Southern Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2011/4367/0/4367a839", "title": "Patent Service Self-Organizing Maps", "doi": null, "abstractUrl": "/proceedings-article/itng/2011/4367a839/12OmNyQYt80", "parentPublication": { "id": "proceedings/itng/2011/4367/0", "title": "Information Technology: New Generations, Third International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbr-lars/2012/4906/0/4906a302", "title": "Parallel High Dimensional Self Organizing Maps Using CUDA", "doi": null, "abstractUrl": "/proceedings-article/sbr-lars/2012/4906a302/12OmNz2kqmP", "parentPublication": { "id": "proceedings/sbr-lars/2012/4906/0", "title": "Brazilian Robotics Symposium and Latin American Robotics Symposium (SBR-LARS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2018/7449/0/744900a908", "title": "NP-SOM: Network Programmable Self-Organizing Maps", "doi": null, "abstractUrl": "/proceedings-article/ictai/2018/744900a908/17D45Wuc33z", "parentPublication": { "id": "proceedings/ictai/2018/7449/0", "title": "2018 IEEE 30th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnisc/2018/6956/0/695600a261", "title": "Physical Fitness Clustering Analysis Based on Self-Organizing Feature Maps Network", "doi": null, "abstractUrl": "/proceedings-article/icnisc/2018/695600a261/1dUo4xvG6yI", "parentPublication": { "id": "proceedings/icnisc/2018/6956/0", "title": "2018 4th Annual International Conference on Network and Information Systems for Computers (ICNISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asap/2020/7147/0/09153224", "title": "A New Hardware Approach to Self-Organizing Maps", "doi": null, "abstractUrl": "/proceedings-article/asap/2020/09153224/1lUFnur22IM", "parentPublication": { "id": "proceedings/asap/2020/7147/0", "title": "2020 IEEE 31st International Conference on Application-specific Systems, Architectures and Processors (ASAP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrkjVbR", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNAle6Eb", "doi": "10.1109/ICME.2013.6607493", "title": "A novel approach for partial blur detection and segmentation", "normalizedTitle": "A novel approach for partial blur detection and segmentation", "abstract": "This paper proposes a novel approach for partial blur detection and segmentation. The local blur kernels of image blocks are firstly estimated and then a reblurring technique is used to measure relative blur degrees of the local blur kernels. The output of reblurring is a metric to classify blurred and non-blurred image blocks. Furthermore, block-based and pixel-based techniques are incorporated for a fine segmentation of blurred and non-blurred regions. Our approach is evaluated for out-of-focus and motion blurred images. The experimental results show that the proposed approach detects and segments the blurred and non-blurred regions in partial blurred images with 88% accuracy for natural out-of-focus blur, 86% accuracy for artificial out-of-focus blur and 83% accuracy for artificial motion blur, which outperforms the state-of-the-art approaches of partial blur detection and segmentation.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes a novel approach for partial blur detection and segmentation. The local blur kernels of image blocks are firstly estimated and then a reblurring technique is used to measure relative blur degrees of the local blur kernels. The output of reblurring is a metric to classify blurred and non-blurred image blocks. Furthermore, block-based and pixel-based techniques are incorporated for a fine segmentation of blurred and non-blurred regions. Our approach is evaluated for out-of-focus and motion blurred images. The experimental results show that the proposed approach detects and segments the blurred and non-blurred regions in partial blurred images with 88% accuracy for natural out-of-focus blur, 86% accuracy for artificial out-of-focus blur and 83% accuracy for artificial motion blur, which outperforms the state-of-the-art approaches of partial blur detection and segmentation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes a novel approach for partial blur detection and segmentation. The local blur kernels of image blocks are firstly estimated and then a reblurring technique is used to measure relative blur degrees of the local blur kernels. The output of reblurring is a metric to classify blurred and non-blurred image blocks. Furthermore, block-based and pixel-based techniques are incorporated for a fine segmentation of blurred and non-blurred regions. Our approach is evaluated for out-of-focus and motion blurred images. The experimental results show that the proposed approach detects and segments the blurred and non-blurred regions in partial blurred images with 88% accuracy for natural out-of-focus blur, 86% accuracy for artificial out-of-focus blur and 83% accuracy for artificial motion blur, which outperforms the state-of-the-art approaches of partial blur detection and segmentation.", "fno": "06607493", "keywords": [ "Kernel", "Image Segmentation", "Motion Segmentation", "Accuracy", "Convolution", "Shape", "Databases", "Blur Segmentation", "Blurred Image", "Blur Kernel", "Partial Blur Detection" ], "authors": [ { "affiliation": "Nanyang Technological University, Singapore", "fullName": "Khosro Bahrami", "givenName": "Khosro", "surname": "Bahrami", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanyang Technological University, Singapore", "fullName": "Alex C. Kot", "givenName": "Alex C.", "surname": "Kot", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanyang Technological University, Singapore", "fullName": "Jiayuan Fan", "givenName": null, "surname": "Jiayuan Fan", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2013", "issn": "1945-7871", "isbn": "978-1-4799-0015-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06607492", "articleId": "12OmNBKEyBc", "__typename": "AdjacentArticleType" }, "next": { "fno": "06607494", "articleId": "12OmNqNos8m", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2008/2242/0/04587465", "title": "Image partial blur detection and classification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587465/12OmNAYXWHY", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118c965", "title": "Discriminative Blur Detection Features", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118c965/12OmNBDyA7e", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/219P2B18", "title": "Seeing through the blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/219P2B18/12OmNBpVPWe", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543258", "title": "Removing motion blur from barcode images", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543258/12OmNC8dgfA", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851b846", "title": "Parametric Object Motion from Blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851b846/12OmNrMHOlr", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851a459", "title": "Soft-Segmentation Guided Object Motion Deblurring", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851a459/12OmNrYCXYD", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206711", "title": "High-quality curvelet-based motion deblurring from an image pair", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206711/12OmNzahbTe", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a702", "title": "Localized Image Blur Removal through Non-parametric Kernel Estimation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a702/12OmNzsJ7y6", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/06/06127874", "title": "A Blur-Robust Descriptor with Applications to Face Recognition", "doi": null, "abstractUrl": "/journal/tp/2012/06/06127874/13rRUILtJAW", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccia/2019/2128/0/212800a063", "title": "Blur Identification of the Degraded Images Based on Convolutional Neural Network", "doi": null, "abstractUrl": "/proceedings-article/iccia/2019/212800a063/1f8MFqFnpjG", "parentPublication": { "id": "proceedings/iccia/2019/2128/0", "title": "2019 4th International Conference on Computational Intelligence and Applications (ICCIA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwdbUZW", "title": "2009 8th IEEE International Symposium on Mixed and Augmented Reality", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNBLdKMB", "doi": "10.1109/ISMAR.2009.5336480", "title": "ESM-Blur: Handling & rendering blur in 3D tracking and augmentation", "normalizedTitle": "ESM-Blur: Handling & rendering blur in 3D tracking and augmentation", "abstract": "The contribution of this paper is two-fold. First, we show how to extend the ESM algorithm to handle motion blur in 3D object tracking. ESM is a powerful algorithm for template matching-based tracking, but it can fail under motion blur. We introduce an image formation model that explicitly considers the possibility of blur, and show it results in a generalization of the original ESM algorithm. This allows to converge faster, more accurately and more robustly even under large amount of blur. Our second contribution is an efficient method for rendering the virtual objects under the estimated motion blur. It renders two images of the object under 3D perspective, and warps them to create many intermediate images. By fusing these images we obtain a final image for the virtual objects blurred consistently with the captured image. Because warping is much faster that 3D rendering, we can create realistically blurred images at a very low computational cost.", "abstracts": [ { "abstractType": "Regular", "content": "The contribution of this paper is two-fold. First, we show how to extend the ESM algorithm to handle motion blur in 3D object tracking. ESM is a powerful algorithm for template matching-based tracking, but it can fail under motion blur. We introduce an image formation model that explicitly considers the possibility of blur, and show it results in a generalization of the original ESM algorithm. This allows to converge faster, more accurately and more robustly even under large amount of blur. Our second contribution is an efficient method for rendering the virtual objects under the estimated motion blur. It renders two images of the object under 3D perspective, and warps them to create many intermediate images. By fusing these images we obtain a final image for the virtual objects blurred consistently with the captured image. Because warping is much faster that 3D rendering, we can create realistically blurred images at a very low computational cost.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The contribution of this paper is two-fold. First, we show how to extend the ESM algorithm to handle motion blur in 3D object tracking. ESM is a powerful algorithm for template matching-based tracking, but it can fail under motion blur. We introduce an image formation model that explicitly considers the possibility of blur, and show it results in a generalization of the original ESM algorithm. This allows to converge faster, more accurately and more robustly even under large amount of blur. Our second contribution is an efficient method for rendering the virtual objects under the estimated motion blur. It renders two images of the object under 3D perspective, and warps them to create many intermediate images. By fusing these images we obtain a final image for the virtual objects blurred consistently with the captured image. Because warping is much faster that 3D rendering, we can create realistically blurred images at a very low computational cost.", "fno": "05336480", "keywords": [], "authors": [ { "affiliation": "GIST, U-VR Lab, Korea", "fullName": "Youngmin Park", "givenName": "Youngmin", "surname": "Park", "__typename": "ArticleAuthorType" }, { "affiliation": "EPFL, CVLab, Korea", "fullName": "Vincent Lepetit", "givenName": "Vincent", "surname": "Lepetit", "__typename": "ArticleAuthorType" }, { "affiliation": "GIST, U-VR Lab, Korea", "fullName": "Woontack Woo", "givenName": "Woontack", "surname": "Woo", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-10-01T00:00:00", "pubType": "proceedings", "pages": "163-166", "year": "2009", "issn": null, "isbn": "978-1-4244-5390-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05336489", "articleId": "12OmNz3bdGU", "__typename": "AdjacentArticleType" }, "next": { "fno": "05336478", "articleId": "12OmNvjgWFv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2008/2242/0/04587465", "title": "Image partial blur detection and classification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587465/12OmNAYXWHY", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2013/0015/0/06607493", "title": "A novel approach for partial blur detection and segmentation", "doi": null, "abstractUrl": "/proceedings-article/icme/2013/06607493/12OmNAle6Eb", "parentPublication": { "id": "proceedings/icme/2013/0015/0", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2008/2242/0/04587582", "title": "Motion from blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587582/12OmNBU1jFx", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2008/3554/0/04775692", "title": "Motion Blur Identification Using Image derivative", "doi": null, "abstractUrl": "/proceedings-article/isspit/2008/04775692/12OmNrkjVip", "parentPublication": { "id": "proceedings/isspit/2008/3554/0", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2005/2372/2/237220018", "title": "Visual Tracking in the Presence of Motion Blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2005/237220018/12OmNwe2IoC", "parentPublication": { "id": "proceedings/cvpr/2005/2372/2", "title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2000/0813/0/08130022", "title": "Restoration of Multiple Images with Motion Blur in Different Directions", "doi": null, "abstractUrl": "/proceedings-article/wacv/2000/08130022/12OmNx5GU0K", "parentPublication": { "id": "proceedings/wacv/2000/0813/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2015/6964/0/07299159", "title": "Handling motion blur in multi-frame super-resolution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07299159/12OmNzAoi1R", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/09/06025351", "title": "Handling Motion-Blur in 3D Tracking and Rendering for Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2012/09/06025351/13rRUxAAT0Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccia/2019/2128/0/212800a063", "title": "Blur Identification of the Degraded Images Based on Convolutional Neural Network", "doi": null, "abstractUrl": "/proceedings-article/iccia/2019/212800a063/1f8MFqFnpjG", "parentPublication": { "id": "proceedings/iccia/2019/2128/0", "title": "2019 4th International Conference on Computational Intelligence and Applications (ICCIA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900b706", "title": "Improved Handling of Motion Blur in Online Object Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900b706/1yeMmuCia9q", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwekjuI", "title": "2008 IEEE International Conference on Signal Image Technology and Internet Based Systems", "acronym": "sitis", "groupId": "1002425", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNBhHt8d", "doi": "10.1109/SITIS.2008.38", "title": "Image Extrema Analysis and Blur Detection with Identification", "normalizedTitle": "Image Extrema Analysis and Blur Detection with Identification", "abstract": "In real image processing applications, images may be blurred or not. When blur is present, the type and degree of degradation vary from one image to another. The process of restoring these images are usually computationally demanding so that there is a need to first detect blurs. If an image is not blurred then it need not undergo the restoration process. In this work, a novel algorithm that simultaneously detects and identifies blurs, is proposed. This method is based on the analysis of extrema values in an image. The extrema histograms are first constructed then analyzed in order to extract feature values. The distinctness of these values in the presence of blur is used. It is computationally simple and fast thereby making it suitable for preprocessing especially in practical imaging applications. Experimental results on natural images and its synthetically blurred versions show the validity of the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "In real image processing applications, images may be blurred or not. When blur is present, the type and degree of degradation vary from one image to another. The process of restoring these images are usually computationally demanding so that there is a need to first detect blurs. If an image is not blurred then it need not undergo the restoration process. In this work, a novel algorithm that simultaneously detects and identifies blurs, is proposed. This method is based on the analysis of extrema values in an image. The extrema histograms are first constructed then analyzed in order to extract feature values. The distinctness of these values in the presence of blur is used. It is computationally simple and fast thereby making it suitable for preprocessing especially in practical imaging applications. Experimental results on natural images and its synthetically blurred versions show the validity of the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In real image processing applications, images may be blurred or not. When blur is present, the type and degree of degradation vary from one image to another. The process of restoring these images are usually computationally demanding so that there is a need to first detect blurs. If an image is not blurred then it need not undergo the restoration process. In this work, a novel algorithm that simultaneously detects and identifies blurs, is proposed. This method is based on the analysis of extrema values in an image. The extrema histograms are first constructed then analyzed in order to extract feature values. The distinctness of these values in the presence of blur is used. It is computationally simple and fast thereby making it suitable for preprocessing especially in practical imaging applications. Experimental results on natural images and its synthetically blurred versions show the validity of the proposed method.", "fno": "3493a320", "keywords": [ "Extrema Analysis", "Blur Detection", "Blur Identification" ], "authors": [ { "affiliation": null, "fullName": "Rachel Mabanag Chong", "givenName": "Rachel Mabanag", "surname": "Chong", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Toshihisa Tanaka", "givenName": "Toshihisa", "surname": "Tanaka", "__typename": "ArticleAuthorType" } ], "idPrefix": "sitis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-11-01T00:00:00", "pubType": "proceedings", "pages": "320-326", "year": "2008", "issn": null, "isbn": "978-0-7695-3493-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3493a314", "articleId": "12OmNx7ouXA", "__typename": "AdjacentArticleType" }, "next": { "fno": "3493a327", "articleId": "12OmNAqCtLn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2008/2242/0/04587465", "title": "Image partial blur detection and classification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587465/12OmNAYXWHY", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682346", "title": "A No-Reference Quality Metric for Blur Image", "doi": null, "abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682346/12OmNBtl1zv", "parentPublication": { "id": "proceedings/greencom-ithingscpscom/2013/5046/0", "title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisp/2008/3119/1/3119a467", "title": "Blind Image Quality Assessment for Measuring Image Blur", "doi": null, "abstractUrl": "/proceedings-article/cisp/2008/3119a467/12OmNqJ8tbn", "parentPublication": { "id": "proceedings/cisp/2008/3119/1", "title": "Image and Signal Processing, Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2008/3554/0/04775692", "title": "Motion Blur Identification Using Image derivative", "doi": null, "abstractUrl": "/proceedings-article/isspit/2008/04775692/12OmNrkjVip", "parentPublication": { "id": "proceedings/isspit/2008/3554/0", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aici/2010/4225/1/4225a116", "title": "Image-blur-based Robust Weed Recognition", "doi": null, "abstractUrl": "/proceedings-article/aici/2010/4225a116/12OmNvSbBA3", "parentPublication": { "id": "proceedings/aici/2010/4225/1", "title": "Artificial Intelligence and Computational Intelligence, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mines/2011/4559/0/4559a041", "title": "Effective Pretreatment in Identification of Motion-Blur Direction", "doi": null, "abstractUrl": "/proceedings-article/mines/2011/4559a041/12OmNwCsdP2", "parentPublication": { "id": "proceedings/mines/2011/4559/0", "title": "Multimedia Information Networking and Security, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2011/1101/0/06126357", "title": "Blurred target tracking by Blur-driven Tracker", "doi": null, "abstractUrl": "/proceedings-article/iccv/2011/06126357/12OmNyr8Yed", "parentPublication": { "id": "proceedings/iccv/2011/1101/0", "title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1996/7258/0/72580027", "title": "Space Scale Localization, Blur, and Contour-Based Image Coding", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1996/72580027/12OmNzBOihE", "parentPublication": { "id": "proceedings/cvpr/1996/7258/0", "title": "Proceedings CVPR IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a702", "title": "Localized Image Blur Removal through Non-parametric Kernel Estimation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a702/12OmNzsJ7y6", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2005/9313/0/01577212", "title": "Motion blur identification in noisy images using fuzzy sets", "doi": null, "abstractUrl": "/proceedings-article/isspit/2005/01577212/12OmNzsrwhD", "parentPublication": { "id": "proceedings/isspit/2005/9313/0", "title": "2005 IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx8wTeX", "title": "Artificial Intelligence and Computational Intelligence, International Conference on", "acronym": "aici", "groupId": "1003069", "volume": "1", "displayVolume": "1", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNvSbBA3", "doi": "10.1109/AICI.2010.31", "title": "Image-blur-based Robust Weed Recognition", "normalizedTitle": "Image-blur-based Robust Weed Recognition", "abstract": "Image motion blur and defocus blur often occur when there is a relative motion between the imaging camera and the detected object. These two blurs will degrade the image quality and will also decrease the subsequent pattern recognition accuracy. In this paper, we propose a robust weed recognition scheme using the low quality color weed images with the above-mentioned image blurs. The proposed scheme consists of three steps. First, image matte is used to segment the soil and the plant. Second, a generative learning method is introduced in the training step to simulate blurred images by controlling blur parameters. Finally, weed recognition is performed by using the blurred color information based on the subspace method. We have experimentally proved that the effective use of image blurs improves the recognition accuracy of camera-captured weeds.", "abstracts": [ { "abstractType": "Regular", "content": "Image motion blur and defocus blur often occur when there is a relative motion between the imaging camera and the detected object. These two blurs will degrade the image quality and will also decrease the subsequent pattern recognition accuracy. In this paper, we propose a robust weed recognition scheme using the low quality color weed images with the above-mentioned image blurs. The proposed scheme consists of three steps. First, image matte is used to segment the soil and the plant. Second, a generative learning method is introduced in the training step to simulate blurred images by controlling blur parameters. Finally, weed recognition is performed by using the blurred color information based on the subspace method. We have experimentally proved that the effective use of image blurs improves the recognition accuracy of camera-captured weeds.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Image motion blur and defocus blur often occur when there is a relative motion between the imaging camera and the detected object. These two blurs will degrade the image quality and will also decrease the subsequent pattern recognition accuracy. In this paper, we propose a robust weed recognition scheme using the low quality color weed images with the above-mentioned image blurs. The proposed scheme consists of three steps. First, image matte is used to segment the soil and the plant. Second, a generative learning method is introduced in the training step to simulate blurred images by controlling blur parameters. Finally, weed recognition is performed by using the blurred color information based on the subspace method. We have experimentally proved that the effective use of image blurs improves the recognition accuracy of camera-captured weeds.", "fno": "4225a116", "keywords": [ "Pattern Recognition", "Weed Recognition", "Motion Blur", "Defocus Blur", "Image Matte" ], "authors": [ { "affiliation": null, "fullName": "Zhao Peng", "givenName": "Zhao", "surname": "Peng", "__typename": "ArticleAuthorType" } ], "idPrefix": "aici", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-10-01T00:00:00", "pubType": "proceedings", "pages": "116-119", "year": "2010", "issn": null, "isbn": "978-0-7695-4225-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4225a111", "articleId": "12OmNyrqzDt", "__typename": "AdjacentArticleType" }, "next": { "fno": "4225a120", "articleId": "12OmNznkKcM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sitis/2008/3493/0/3493a320", "title": "Image Extrema Analysis and Blur Detection with Identification", "doi": null, "abstractUrl": "/proceedings-article/sitis/2008/3493a320/12OmNBhHt8d", "parentPublication": { "id": "proceedings/sitis/2008/3493/0", "title": "2008 IEEE International Conference on Signal Image Technology and Internet Based Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdip/2009/3565/0/3565a255", "title": "Edge Link Detector Based Weed Classifier", "doi": null, "abstractUrl": "/proceedings-article/icdip/2009/3565a255/12OmNBqMDwO", "parentPublication": { "id": "proceedings/icdip/2009/3565/0", "title": "Digital Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isda/2008/3382/2/3382b277", "title": "A Face Image Database for Evaluating Out-of-Focus Blur", "doi": null, "abstractUrl": "/proceedings-article/isda/2008/3382b277/12OmNrYCXPV", "parentPublication": { "id": "proceedings/isda/2008/3382/2", "title": "2008 Eighth International Conference on Intelligent Systems Design and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2008/3554/0/04775692", "title": "Motion Blur Identification Using Image derivative", "doi": null, "abstractUrl": "/proceedings-article/isspit/2008/04775692/12OmNrkjVip", "parentPublication": { "id": "proceedings/isspit/2008/3554/0", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmsp/2011/4356/1/4356a156", "title": "Robust Weed Recognition Using Blur Moment Invariants", "doi": null, "abstractUrl": "/proceedings-article/cmsp/2011/4356a156/12OmNwD1pXl", "parentPublication": { "id": "proceedings/cmsp/2011/4356/1", "title": "Multimedia and Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icetc/2009/3609/0/3609a224", "title": "Weed Recognition Based on Erosion and Dilation Segmentation Algorithm", "doi": null, "abstractUrl": "/proceedings-article/icetc/2009/3609a224/12OmNxj235n", "parentPublication": { "id": "proceedings/icetc/2009/3609/0", "title": "2009 International Conference on Education Technology and Computer, ICETC 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kam/2009/3888/1/3888a375", "title": "Weed Seeds Recognition Using Color PCA", "doi": null, "abstractUrl": "/proceedings-article/kam/2009/3888a375/12OmNy5hRel", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/06/06127874", "title": "A Blur-Robust Descriptor with Applications to Face Recognition", "doi": null, "abstractUrl": "/journal/tp/2012/06/06127874/13rRUILtJAW", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1998/06/i0652", "title": "Recovering Affine Motion and Defocus Blur Simultaneously", "doi": null, "abstractUrl": "/journal/tp/1998/06/i0652/13rRUxC0SF6", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100b771", "title": "Deep Embeddings-based Place Recognition Robust to Motion Blur", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100b771/1yNhsObYWRO", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCaLEnp", "title": "Multimedia Information Networking and Security, International Conference on", "acronym": "mines", "groupId": "1003021", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNwCsdP2", "doi": "10.1109/MINES.2011.20", "title": "Effective Pretreatment in Identification of Motion-Blur Direction", "normalizedTitle": "Effective Pretreatment in Identification of Motion-Blur Direction", "abstract": "A motion-blur image has special blurred information which can be dealt to estimate its blurred direction. There are several methods to identify the direction such as based on directional derivation and weighted average [1]. The blurred image can't recognized smoothly and timely in the area of industrial machine vision. This paper provides an effective pretreatment method to indentify the motion-blur direction related to the high frequency of the image. This method satisfies the high-speed requirements in industrial machine vision. Find the interval of the direction, then used the optimized method based on direction of differential coefficient to find the direction of the motion-blur image. The merit of this method is increased the convergence speed and save the time complexity.", "abstracts": [ { "abstractType": "Regular", "content": "A motion-blur image has special blurred information which can be dealt to estimate its blurred direction. There are several methods to identify the direction such as based on directional derivation and weighted average [1]. The blurred image can't recognized smoothly and timely in the area of industrial machine vision. This paper provides an effective pretreatment method to indentify the motion-blur direction related to the high frequency of the image. This method satisfies the high-speed requirements in industrial machine vision. Find the interval of the direction, then used the optimized method based on direction of differential coefficient to find the direction of the motion-blur image. The merit of this method is increased the convergence speed and save the time complexity.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A motion-blur image has special blurred information which can be dealt to estimate its blurred direction. There are several methods to identify the direction such as based on directional derivation and weighted average [1]. The blurred image can't recognized smoothly and timely in the area of industrial machine vision. This paper provides an effective pretreatment method to indentify the motion-blur direction related to the high frequency of the image. This method satisfies the high-speed requirements in industrial machine vision. Find the interval of the direction, then used the optimized method based on direction of differential coefficient to find the direction of the motion-blur image. The merit of this method is increased the convergence speed and save the time complexity.", "fno": "4559a041", "keywords": [ "Motion Blur", "Machine Vision", "Image Recognition", "High Frequency", "Direction Of Differential Coefficient" ], "authors": [ { "affiliation": null, "fullName": "Xia Yubin", "givenName": "Xia", "surname": "Yubin", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Meng Fanbin", "givenName": "Meng", "surname": "Fanbin", "__typename": "ArticleAuthorType" } ], "idPrefix": "mines", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-11-01T00:00:00", "pubType": "proceedings", "pages": "41-45", "year": "2011", "issn": null, "isbn": "978-0-7695-4559-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4559a037", "articleId": "12OmNz2kqff", "__typename": "AdjacentArticleType" }, "next": { "fno": "4559a046", "articleId": "12OmNxWcH2w", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2008/2242/0/04587582", "title": "Motion from blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587582/12OmNBU1jFx", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2008/3554/0/04775692", "title": "Motion Blur Identification Using Image derivative", "doi": null, "abstractUrl": "/proceedings-article/isspit/2008/04775692/12OmNrkjVip", "parentPublication": { "id": "proceedings/isspit/2008/3554/0", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aici/2010/4225/1/4225a116", "title": "Image-blur-based Robust Weed Recognition", "doi": null, "abstractUrl": "/proceedings-article/aici/2010/4225a116/12OmNvSbBA3", "parentPublication": { "id": "proceedings/aici/2010/4225/1", "title": "Artificial Intelligence and Computational Intelligence, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2000/0813/0/08130022", "title": "Restoration of Multiple Images with Motion Blur in Different Directions", "doi": null, "abstractUrl": "/proceedings-article/wacv/2000/08130022/12OmNx5GU0K", "parentPublication": { "id": "proceedings/wacv/2000/0813/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/221P2B20", "title": "Optical flow in the presence of spatially-varying motion blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/221P2B20/12OmNyQYtvR", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2009/3583/1/3583a453", "title": "Direction Parameter Identification of Motion-Blurred Image Based on Three Second Order Frequency Moments", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583a453/12OmNzZmZrV", "parentPublication": { "id": "proceedings/icmtma/2009/3583/3", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2005/9313/0/01577212", "title": "Motion blur identification in noisy images using fuzzy sets", "doi": null, "abstractUrl": "/proceedings-article/isspit/2005/01577212/12OmNzsrwhD", "parentPublication": { "id": "proceedings/isspit/2005/9313/0", "title": "2005 IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/08/ttp2011081603", "title": "Richardson-Lucy Deblurring for Scenes under a Projective Motion Path", "doi": null, "abstractUrl": "/journal/tp/2011/08/ttp2011081603/13rRUIM2VIc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/09/06025351", "title": "Handling Motion-Blur in 3D Tracking and Rendering for Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2012/09/06025351/13rRUxAAT0Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/11/ttp2011112329", "title": "Motion Regularization for Matting Motion Blurred Objects", "doi": null, "abstractUrl": "/journal/tp/2011/11/ttp2011112329/13rRUytF42E", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzSh1be", "title": "Applications of Computer Vision, IEEE Workshop on", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2000", "__typename": "ProceedingType" }, "article": { "id": "12OmNx5GU0K", "doi": "10.1109/WACV.2000.895398", "title": "Restoration of Multiple Images with Motion Blur in Different Directions", "normalizedTitle": "Restoration of Multiple Images with Motion Blur in Different Directions", "abstract": "Images degraded by motion blur can be restored when several blurred images are given, and the direction of motion blur in each image is different. Given two motion blurred images, best restoration is obtained when the directions of motion blur in the two images are orthogonal. Motion blur at different directions is common, for example, in the case of small hand-held digital cameras due to fast hand trembling and the light weight of the camera. Restoration examples are given on simulated data as well as on images with real motion blur.", "abstracts": [ { "abstractType": "Regular", "content": "Images degraded by motion blur can be restored when several blurred images are given, and the direction of motion blur in each image is different. Given two motion blurred images, best restoration is obtained when the directions of motion blur in the two images are orthogonal. Motion blur at different directions is common, for example, in the case of small hand-held digital cameras due to fast hand trembling and the light weight of the camera. Restoration examples are given on simulated data as well as on images with real motion blur.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Images degraded by motion blur can be restored when several blurred images are given, and the direction of motion blur in each image is different. Given two motion blurred images, best restoration is obtained when the directions of motion blur in the two images are orthogonal. Motion blur at different directions is common, for example, in the case of small hand-held digital cameras due to fast hand trembling and the light weight of the camera. Restoration examples are given on simulated data as well as on images with real motion blur.", "fno": "08130022", "keywords": [], "authors": [ { "affiliation": "The Hebrew University of Jerusalem", "fullName": "Alex Rav-Acha", "givenName": "Alex", "surname": "Rav-Acha", "__typename": "ArticleAuthorType" }, { "affiliation": "The Hebrew University of Jerusalem", "fullName": "Shmuel Peleg", "givenName": "Shmuel", "surname": "Peleg", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2000-12-01T00:00:00", "pubType": "proceedings", "pages": "22", "year": "2000", "issn": null, "isbn": "0-7695-0813-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08130016", "articleId": "12OmNCdk2EK", "__typename": "AdjacentArticleType" }, "next": { "fno": "08130029", "articleId": "12OmNqFrGHp", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2008/2242/0/04587465", "title": "Image partial blur detection and classification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587465/12OmNAYXWHY", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2008/2242/0/04587582", "title": "Motion from blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587582/12OmNBU1jFx", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icacc/2009/3516/0/3516a327", "title": "RBFN Based Motion Blur Parameter Estimation", "doi": null, "abstractUrl": "/proceedings-article/icacc/2009/3516a327/12OmNrJiCMB", "parentPublication": { "id": "proceedings/icacc/2009/3516/0", "title": "2009 International Conference on Advanced Computer Control", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2008/3554/0/04775692", "title": "Motion Blur Identification Using Image derivative", "doi": null, "abstractUrl": "/proceedings-article/isspit/2008/04775692/12OmNrkjVip", "parentPublication": { "id": "proceedings/isspit/2008/3554/0", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsee/2012/4647/2/4647b554", "title": "Approach to Optimizing Restoration of Motion Blur Images with Robust Blind Deconvolution Based on New Cepstrum and Total Variation", "doi": null, "abstractUrl": "/proceedings-article/iccsee/2012/4647b554/12OmNvAiSsd", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2001/7041/3/00941301", "title": "Simultaneous image formation and motion blur restoration via multiple capture", "doi": null, "abstractUrl": "/proceedings-article/icassp/2001/00941301/12OmNvxbhK7", "parentPublication": { "id": "proceedings/icassp/2001/7041/3", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoip/2010/4252/2/4252b714", "title": "Unified Restoration Method for Different Degraded Images", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252b714/12OmNyKa63B", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev/2016/1269/0/07760027", "title": "Novel method to assess motion blur kernel parameters and comparative study of restoration techniques using different image layouts", "doi": null, "abstractUrl": "/proceedings-article/iciev/2016/07760027/12OmNzBOhJ9", "parentPublication": { "id": "proceedings/iciev/2016/1269/0", "title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ita/2013/2877/0/2876a072", "title": "The Implementation on the Restoration of Motion-Blurred Images of Ochotona Curzoniae", "doi": null, "abstractUrl": "/proceedings-article/ita/2013/2876a072/12OmNzVXNOE", "parentPublication": { "id": "proceedings/ita/2013/2877/0", "title": "2013 International Conference on Information Technology and Applications (ITA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccia/2019/2128/0/212800a063", "title": "Blur Identification of the Degraded Images Based on Convolutional Neural Network", "doi": null, "abstractUrl": "/proceedings-article/iccia/2019/212800a063/1f8MFqFnpjG", "parentPublication": { "id": "proceedings/iccia/2019/2128/0", "title": "2019 4th International Conference on Computational Intelligence and Applications (ICCIA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBkfRhw", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNzAoi1R", "doi": "10.1109/CVPR.2015.7299159", "title": "Handling motion blur in multi-frame super-resolution", "normalizedTitle": "Handling motion blur in multi-frame super-resolution", "abstract": "Ubiquitous motion blur easily fails multi-frame super-resolution (MFSR). Our method proposed in this paper tackles this issue by optimally searching least blurred pixels in MFSR. An EM framework is proposed to guide residual blur estimation and high-resolution image reconstruction. To suppress noise, we employ a family of sparse penalties as natural image priors, along with an effective solver. Theoretical analysis is performed on how and when our method works. The relationship between estimation errors of motion blur and the quality of input images is discussed. Our method produces sharp and higher-resolution results given input of challenging low-resolution noisy and blurred sequences.", "abstracts": [ { "abstractType": "Regular", "content": "Ubiquitous motion blur easily fails multi-frame super-resolution (MFSR). Our method proposed in this paper tackles this issue by optimally searching least blurred pixels in MFSR. An EM framework is proposed to guide residual blur estimation and high-resolution image reconstruction. To suppress noise, we employ a family of sparse penalties as natural image priors, along with an effective solver. Theoretical analysis is performed on how and when our method works. The relationship between estimation errors of motion blur and the quality of input images is discussed. Our method produces sharp and higher-resolution results given input of challenging low-resolution noisy and blurred sequences.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Ubiquitous motion blur easily fails multi-frame super-resolution (MFSR). Our method proposed in this paper tackles this issue by optimally searching least blurred pixels in MFSR. An EM framework is proposed to guide residual blur estimation and high-resolution image reconstruction. To suppress noise, we employ a family of sparse penalties as natural image priors, along with an effective solver. Theoretical analysis is performed on how and when our method works. The relationship between estimation errors of motion blur and the quality of input images is discussed. Our method produces sharp and higher-resolution results given input of challenging low-resolution noisy and blurred sequences.", "fno": "07299159", "keywords": [], "authors": [ { "affiliation": "University of Chinese Academy of Sciences & State Key Lab. of Computer Science, Inst. of Software, CAS, China", "fullName": "Ziyang Ma", "givenName": null, "surname": "Ziyang Ma", "__typename": "ArticleAuthorType" }, { "affiliation": "The Chinese University of Hong Kong, China", "fullName": "Renjie Liao", "givenName": null, "surname": "Renjie Liao", "__typename": "ArticleAuthorType" }, { "affiliation": "The Chinese University of Hong Kong, China", "fullName": "Xin Tao", "givenName": null, "surname": "Xin Tao", "__typename": "ArticleAuthorType" }, { "affiliation": "The Chinese University of Hong Kong, China", "fullName": "Li Xu", "givenName": "Li", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "The Chinese University of Hong Kong, China", "fullName": "Jiaya Jia", "givenName": "Jiaya", "surname": "Jia", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Chinese Academy of Sciences & State Key Lab. of Computer Science, Inst. of Software, CAS, China", "fullName": "Enhua Wu", "givenName": null, "surname": "Enhua Wu", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-06-01T00:00:00", "pubType": "proceedings", "pages": "5224-5232", "year": "2015", "issn": "1063-6919", "isbn": "978-1-4673-6964-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07299158", "articleId": "12OmNzxPTJD", "__typename": "AdjacentArticleType" }, "next": { "fno": "07299160", "articleId": "12OmNB7LvCs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2008/2242/0/04587465", "title": "Image partial blur detection and classification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587465/12OmNAYXWHY", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840c832", "title": "Accurate Blur Models vs. Image Priors in Single Image Super-resolution", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840c832/12OmNB0X8qY", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2009/5390/0/05336480", "title": "ESM-Blur: Handling & rendering blur in 3D tracking and augmentation", "doi": null, "abstractUrl": "/proceedings-article/ismar/2009/05336480/12OmNBLdKMB", "parentPublication": { "id": "proceedings/ismar/2009/5390/0", "title": "2009 8th IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2008/2242/0/04587582", "title": "Motion from blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587582/12OmNBU1jFx", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2001/1272/1/127210645", "title": "Robust Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2001/127210645/12OmNqFJhDL", "parentPublication": { "id": "proceedings/cvpr/2001/1272/1", "title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2008/3554/0/04775692", "title": "Motion Blur Identification Using Image derivative", "doi": null, "abstractUrl": "/proceedings-article/isspit/2008/04775692/12OmNrkjVip", "parentPublication": { "id": "proceedings/isspit/2008/3554/0", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/02/ttp2014020346", "title": "On Bayesian Adaptive Video Super Resolution", "doi": null, "abstractUrl": "/journal/tp/2014/02/ttp2014020346/13rRUEgs2uz", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/09/06025351", "title": "Handling Motion-Blur in 3D Tracking and Rendering for Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2012/09/06025351/13rRUxAAT0Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600b797", "title": "Deep Model-Based Super-Resolution with Non-uniform Blur", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600b797/1L6LDSXDh7i", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900b706", "title": "Improved Handling of Motion Blur in Online Object Detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900b706/1yeMmuCia9q", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxbW4OV", "title": "2012 IEEE Ninth International Conference on Advanced Video and Signal-Based Surveillance", "acronym": "avss", "groupId": "1001307", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNzTYC0Z", "doi": "10.1109/AVSS.2012.78", "title": "Tracking Blurred Object with Data-Driven Tracker", "normalizedTitle": "Tracking Blurred Object with Data-Driven Tracker", "abstract": "Motion blur is very common in the low quality of image sequences and videos captured by low speed of cameras. Object tracking without accounting for the motion blur would easily fail in these kinds of videos. We propose a new data-driven tracker in the particle filter framework to address this problem without deblurring the image sequences. The motion blur is detected by exploring the property of the blurred input image through Fourier analysis. The appearance model is integrated with a set of motion blur kernels which could reflect different blur effects in real scenes. The motion model is improved to be more robust to sudden motion of the target object. To evaluate the proposed algorithm, several challenging videos with significant motion blur are used in the experiments. The experimental results demonstrate the robustness and accuracy of our algorithm.", "abstracts": [ { "abstractType": "Regular", "content": "Motion blur is very common in the low quality of image sequences and videos captured by low speed of cameras. Object tracking without accounting for the motion blur would easily fail in these kinds of videos. We propose a new data-driven tracker in the particle filter framework to address this problem without deblurring the image sequences. The motion blur is detected by exploring the property of the blurred input image through Fourier analysis. The appearance model is integrated with a set of motion blur kernels which could reflect different blur effects in real scenes. The motion model is improved to be more robust to sudden motion of the target object. To evaluate the proposed algorithm, several challenging videos with significant motion blur are used in the experiments. The experimental results demonstrate the robustness and accuracy of our algorithm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Motion blur is very common in the low quality of image sequences and videos captured by low speed of cameras. Object tracking without accounting for the motion blur would easily fail in these kinds of videos. We propose a new data-driven tracker in the particle filter framework to address this problem without deblurring the image sequences. The motion blur is detected by exploring the property of the blurred input image through Fourier analysis. The appearance model is integrated with a set of motion blur kernels which could reflect different blur effects in real scenes. The motion model is improved to be more robust to sudden motion of the target object. To evaluate the proposed algorithm, several challenging videos with significant motion blur are used in the experiments. The experimental results demonstrate the robustness and accuracy of our algorithm.", "fno": "4797a331", "keywords": [ "Target Tracking", "Image Sequences", "Algorithm Design And Analysis", "Kernel", "Robustness", "Data Driven", "Object Tracking", "Motion Blur" ], "authors": [ { "affiliation": null, "fullName": "Jianwei Ding", "givenName": "Jianwei", "surname": "Ding", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kaiqi Huang", "givenName": "Kaiqi", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tieniu Tan", "givenName": "Tieniu", "surname": "Tan", "__typename": "ArticleAuthorType" } ], "idPrefix": "avss", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-09-01T00:00:00", "pubType": "proceedings", "pages": "331-336", "year": "2012", "issn": null, "isbn": "978-1-4673-2499-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4797a325", "articleId": "12OmNvStcIu", "__typename": "AdjacentArticleType" }, "next": { "fno": "4797a337", "articleId": "12OmNxxdZJi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032a408", "title": "Flow-Guided Feature Aggregation for Video Object Detection", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a408/12OmNBubOTz", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/1/81831755", "title": "Recovery of blurred video signals using iterative image restoration combined with motion estimation", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81831755/12OmNwe2Iny", "parentPublication": { "id": "proceedings/icip/1997/8183/1", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/221P2B20", "title": "Optical flow in the presence of spatially-varying motion blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/221P2B20/12OmNyQYtvR", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2011/1101/0/06126357", "title": "Blurred target tracking by Blur-driven Tracker", "doi": null, "abstractUrl": "/proceedings-article/iccv/2011/06126357/12OmNyr8Yed", "parentPublication": { "id": "proceedings/iccv/2011/1101/0", "title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118b202", "title": "Multi-forest Tracker: A Chameleon in Tracking", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118b202/12OmNzCWG5i", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2009/3583/1/3583a453", "title": "Direction Parameter Identification of Motion-Blurred Image Based on Three Second Order Frequency Moments", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583a453/12OmNzZmZrV", "parentPublication": { "id": "proceedings/icmtma/2009/3583/3", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/08/ttp2011081603", "title": "Richardson-Lucy Deblurring for Scenes under a Projective Motion Path", "doi": null, "abstractUrl": "/journal/tp/2011/08/ttp2011081603/13rRUIM2VIc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/09/06025351", "title": "Handling Motion-Blur in 3D Tracking and Rendering for Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2012/09/06025351/13rRUxAAT0Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2022/1015/0/101500a214", "title": "Deblurred Adversarial Defence For Object Tracking", "doi": null, "abstractUrl": "/proceedings-article/itme/2022/101500a214/1M4rokOuCKk", "parentPublication": { "id": "proceedings/itme/2022/1015/0", "title": "2022 12th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300c300", "title": "Intra-Frame Object Tracking by Deblatting", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300c300/1i5mpfCh3fq", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1f8MEbvTdks", "title": "2019 4th International Conference on Computational Intelligence and Applications (ICCIA)", "acronym": "iccia", "groupId": "1815584", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1f8MFqFnpjG", "doi": "10.1109/ICCIA.2019.00019", "title": "Blur Identification of the Degraded Images Based on Convolutional Neural Network", "normalizedTitle": "Blur Identification of the Degraded Images Based on Convolutional Neural Network", "abstract": "A method of automatic blur identification for the degraded images based on convolutional neural network is presented. There are significant differences in the Fourier spectra of degraded images with different blur types. According to this, the Fourier spectra of a large number of motion blurred images and defocus blurred images which have different blur parameters are used as the training samples to train the convolutional neural network, so that it can recognize the Fourier spectrum corresponding to the degraded image with motion blur or defocus blur. The experimental results show that the trained convolutional neural network can accurately recognize the degraded images of the two blur types with an accuracy of 99%, which lays the foundation for subsequent image restoration.", "abstracts": [ { "abstractType": "Regular", "content": "A method of automatic blur identification for the degraded images based on convolutional neural network is presented. There are significant differences in the Fourier spectra of degraded images with different blur types. According to this, the Fourier spectra of a large number of motion blurred images and defocus blurred images which have different blur parameters are used as the training samples to train the convolutional neural network, so that it can recognize the Fourier spectrum corresponding to the degraded image with motion blur or defocus blur. The experimental results show that the trained convolutional neural network can accurately recognize the degraded images of the two blur types with an accuracy of 99%, which lays the foundation for subsequent image restoration.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A method of automatic blur identification for the degraded images based on convolutional neural network is presented. There are significant differences in the Fourier spectra of degraded images with different blur types. According to this, the Fourier spectra of a large number of motion blurred images and defocus blurred images which have different blur parameters are used as the training samples to train the convolutional neural network, so that it can recognize the Fourier spectrum corresponding to the degraded image with motion blur or defocus blur. The experimental results show that the trained convolutional neural network can accurately recognize the degraded images of the two blur types with an accuracy of 99%, which lays the foundation for subsequent image restoration.", "fno": "212800a063", "keywords": [ "Convolutional Neural Nets", "Image Restoration", "Convolution", "Kernel", "Feature Extraction", "Image Recognition", "Training", "Convolutional Neural Network", "Blur Identification", "Motion Blur", "Defocus Blur" ], "authors": [ { "affiliation": "Beijing Institute of Technology", "fullName": "Yilin Huang", "givenName": "Yilin", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Institute of Technology", "fullName": "Fei Gao", "givenName": "Fei", "surname": "Gao", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccia", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-06-01T00:00:00", "pubType": "proceedings", "pages": "63-67", "year": "2019", "issn": null, "isbn": "978-1-7281-2128-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "212800z003", "articleId": "1f8MEslCgBG", "__typename": "AdjacentArticleType" }, "next": { "fno": "212800a068", "articleId": "1f8MEUv1Jqo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/1994/5825/0/00323899", "title": "Focused image recovery from two defocused images recorded with different camera settings", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1994/00323899/12OmNB8CiYm", "parentPublication": { "id": "proceedings/cvpr/1994/5825/0", "title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118c965", "title": "Discriminative Blur Detection Features", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118c965/12OmNBDyA7e", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2008/3554/0/04775692", "title": "Motion Blur Identification Using Image derivative", "doi": null, "abstractUrl": "/proceedings-article/isspit/2008/04775692/12OmNrkjVip", "parentPublication": { "id": "proceedings/isspit/2008/3554/0", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032f381", "title": "Estimating Defocus Blur via Rank of Local Patches", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032f381/12OmNvEQseS", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoip/2010/4252/2/4252b714", "title": "Unified Restoration Method for Different Degraded Images", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252b714/12OmNyKa63B", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2004/8689/0/01433747", "title": "Finding point spread function of motion blur using Radon transform and modeling the motion length", "doi": null, "abstractUrl": "/proceedings-article/isspit/2004/01433747/12OmNyNQSB3", "parentPublication": { "id": "proceedings/isspit/2004/8689/0", "title": "Proceedings of the Fourth IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1988/9999/0/00196762", "title": "Restoration of degraded images using Markov random fields", "doi": null, "abstractUrl": "/proceedings-article/icassp/1988/00196762/12OmNybfqXb", "parentPublication": { "id": "proceedings/icassp/1988/9999/0", "title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/etcs/2009/3557/3/3557e298", "title": "A New Method for Blur Identification of Out-of-Focus Images", "doi": null, "abstractUrl": "/proceedings-article/etcs/2009/3557e298/12OmNzTH119", "parentPublication": { "id": "proceedings/etcs/2009/3557/3", "title": "Education Technology and Computer Science, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2005/9313/0/01577212", "title": "Motion blur identification in noisy images using fuzzy sets", "doi": null, "abstractUrl": "/proceedings-article/isspit/2005/01577212/12OmNzsrwhD", "parentPublication": { "id": "proceedings/isspit/2005/9313/0", "title": "2005 IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000d080", "title": "Defocus Blur Detection via Multi-stream Bottom-Top-Bottom Fully Convolutional Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000d080/17D45WUj90B", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeMmuCia9q", "doi": "10.1109/CVPR46437.2021.00175", "title": "Improved Handling of Motion Blur in Online Object Detection", "normalizedTitle": "Improved Handling of Motion Blur in Online Object Detection", "abstract": "We wish to detect specific categories of objects, for on-line vision systems that will run in the real world. Object detection is already very challenging. It is even harder when the images are blurred, from the camera being in a car or a hand-held phone. Most existing efforts either focused on sharp images, with easy to label ground truth, or they have treated motion blur as one of many generic corruptions.Instead, we focus especially on the details of egomotion induced blur. We explore five classes of remedies, where each targets different potential causes for the performance gap between sharp and blurred images. For example, first deblurring an image changes its human interpretability, but at present, only partly improves object detection. The other four classes of remedies address multi-scale texture, out-of-distribution testing, label generation, and conditioning by blur-type. Surprisingly, we discover that custom label generation aimed at resolving spatial ambiguity, ahead of all others, markedly improves object detection. Also, in contrast to findings from classification, we see a noteworthy boost by conditioning our model on bespoke categories of motion blur.We validate and cross-breed the different remedies experimentally on blurred COCO images and real-world blur datasets, producing an easy and practical favorite model with superior detection rates.", "abstracts": [ { "abstractType": "Regular", "content": "We wish to detect specific categories of objects, for on-line vision systems that will run in the real world. Object detection is already very challenging. It is even harder when the images are blurred, from the camera being in a car or a hand-held phone. Most existing efforts either focused on sharp images, with easy to label ground truth, or they have treated motion blur as one of many generic corruptions.Instead, we focus especially on the details of egomotion induced blur. We explore five classes of remedies, where each targets different potential causes for the performance gap between sharp and blurred images. For example, first deblurring an image changes its human interpretability, but at present, only partly improves object detection. The other four classes of remedies address multi-scale texture, out-of-distribution testing, label generation, and conditioning by blur-type. Surprisingly, we discover that custom label generation aimed at resolving spatial ambiguity, ahead of all others, markedly improves object detection. Also, in contrast to findings from classification, we see a noteworthy boost by conditioning our model on bespoke categories of motion blur.We validate and cross-breed the different remedies experimentally on blurred COCO images and real-world blur datasets, producing an easy and practical favorite model with superior detection rates.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We wish to detect specific categories of objects, for on-line vision systems that will run in the real world. Object detection is already very challenging. It is even harder when the images are blurred, from the camera being in a car or a hand-held phone. Most existing efforts either focused on sharp images, with easy to label ground truth, or they have treated motion blur as one of many generic corruptions.Instead, we focus especially on the details of egomotion induced blur. We explore five classes of remedies, where each targets different potential causes for the performance gap between sharp and blurred images. For example, first deblurring an image changes its human interpretability, but at present, only partly improves object detection. The other four classes of remedies address multi-scale texture, out-of-distribution testing, label generation, and conditioning by blur-type. Surprisingly, we discover that custom label generation aimed at resolving spatial ambiguity, ahead of all others, markedly improves object detection. Also, in contrast to findings from classification, we see a noteworthy boost by conditioning our model on bespoke categories of motion blur.We validate and cross-breed the different remedies experimentally on blurred COCO images and real-world blur datasets, producing an easy and practical favorite model with superior detection rates.", "fno": "450900b706", "keywords": [ "Computer Vision", "Image Motion Analysis", "Image Texture", "Object Detection", "Real World Blur Datasets", "Superior Detection Rates", "Motion Blur", "Online Object Detection", "On Line Vision Systems", "Hand Held Phone", "Sharp Images", "Egomotion Induced Blur", "Blurred Images", "Blur Type", "Custom Label Generation", "Blurred COCO Images", "Computer Vision", "Computational Modeling", "Machine Vision", "Object Detection", "Cameras", "Pattern Recognition", "Automobiles" ], "authors": [ { "affiliation": "University College London", "fullName": "Mohamed Sayed", "givenName": "Mohamed", "surname": "Sayed", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London", "fullName": "Gabriel Brostow", "givenName": "Gabriel", "surname": "Brostow", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "1706-1716", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "450900b695", "articleId": "1yeL9J0Plpm", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900b717", "articleId": "1yeLH9OT6KY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2009/5390/0/05336480", "title": "ESM-Blur: Handling & rendering blur in 3D tracking and augmentation", "doi": null, "abstractUrl": "/proceedings-article/ismar/2009/05336480/12OmNBLdKMB", "parentPublication": { "id": "proceedings/ismar/2009/5390/0", "title": "2009 8th IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2008/2242/0/04587582", "title": "Motion from blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587582/12OmNBU1jFx", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mines/2011/4559/0/4559a041", "title": "Effective Pretreatment in Identification of Motion-Blur Direction", "doi": null, "abstractUrl": "/proceedings-article/mines/2011/4559a041/12OmNwCsdP2", "parentPublication": { "id": "proceedings/mines/2011/4559/0", "title": "Multimedia Information Networking and Security, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d806", "title": "From Motion Blur to Motion Flow: A Deep Learning Solution for Removing Heterogeneous Motion Blur", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d806/12OmNxTVU29", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a818", "title": "Improved Blur Insensitivity for Decorrelated Local Phase Quantization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a818/12OmNyrIaKr", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2015/6964/0/07299159", "title": "Handling motion blur in multi-frame super-resolution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07299159/12OmNzAoi1R", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2003/1946/0/19460276", "title": "Cartoon Blur: Non-Photorealistic Motion Blur", "doi": null, "abstractUrl": "/proceedings-article/cgi/2003/19460276/12OmNzC5SOT", "parentPublication": { "id": "proceedings/cgi/2003/1946/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/09/06025351", "title": "Handling Motion-Blur in 3D Tracking and Rendering for Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2012/09/06025351/13rRUxAAT0Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09151053", "title": "Photosequencing of Motion Blur using Short and Long Exposures", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09151053/1lPHxR02Q0w", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900g929", "title": "Self-generated Defocus Blur Detection via Dual Adversarial Discriminators", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900g929/1yeIAxMLqG4", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy6qfNZ", "title": "Computer Modeling and Simulation, International Conference on", "acronym": "iccms", "groupId": "1002645", "volume": "1", "displayVolume": "1", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNwOnn3h", "doi": "10.1109/ICCMS.2010.215", "title": "Feature Based Visualization Algorithm for Large-Scale Flow Data", "normalizedTitle": "Feature Based Visualization Algorithm for Large-Scale Flow Data", "abstract": "To analyze large amounts of numerical data, one of the most useful approaches is to use scientific visualization to transform them into graphical images. Flow visualization as one of the challenging topics has played important roles in oceanic data analysis. There are many techniques have been presented in the past decade, but most of them can't get high performance to visualize large-scale flow data in real time. To deduce the computational complexity brought by large flow dataset, feature-based expression will be a helpful way. However, how to get the result images quickly without costing much time for feature extraction and analysis is a very important problem to deal. Based on the common characteristic of flow and the unchangeable scale feather of spiral line, we present a new distributing strategy which needn't locate feature points very accurately and didn't rely on the type of feature fields. The visualization procedure not only can straight forward automatically but also can be changed with user's interactive command. The flow data obtained from the South Sea of China was verified and simulated. The result shows that this method using spiral strategy not templates to setting the seeds to emphasize the interesting fields is much faster and flexible, especially in large-scale flow data visualization.", "abstracts": [ { "abstractType": "Regular", "content": "To analyze large amounts of numerical data, one of the most useful approaches is to use scientific visualization to transform them into graphical images. Flow visualization as one of the challenging topics has played important roles in oceanic data analysis. There are many techniques have been presented in the past decade, but most of them can't get high performance to visualize large-scale flow data in real time. To deduce the computational complexity brought by large flow dataset, feature-based expression will be a helpful way. However, how to get the result images quickly without costing much time for feature extraction and analysis is a very important problem to deal. Based on the common characteristic of flow and the unchangeable scale feather of spiral line, we present a new distributing strategy which needn't locate feature points very accurately and didn't rely on the type of feature fields. The visualization procedure not only can straight forward automatically but also can be changed with user's interactive command. The flow data obtained from the South Sea of China was verified and simulated. The result shows that this method using spiral strategy not templates to setting the seeds to emphasize the interesting fields is much faster and flexible, especially in large-scale flow data visualization.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "To analyze large amounts of numerical data, one of the most useful approaches is to use scientific visualization to transform them into graphical images. Flow visualization as one of the challenging topics has played important roles in oceanic data analysis. There are many techniques have been presented in the past decade, but most of them can't get high performance to visualize large-scale flow data in real time. To deduce the computational complexity brought by large flow dataset, feature-based expression will be a helpful way. However, how to get the result images quickly without costing much time for feature extraction and analysis is a very important problem to deal. Based on the common characteristic of flow and the unchangeable scale feather of spiral line, we present a new distributing strategy which needn't locate feature points very accurately and didn't rely on the type of feature fields. The visualization procedure not only can straight forward automatically but also can be changed with user's interactive command. The flow data obtained from the South Sea of China was verified and simulated. The result shows that this method using spiral strategy not templates to setting the seeds to emphasize the interesting fields is much faster and flexible, especially in large-scale flow data visualization.", "fno": "3941a194", "keywords": [ "Computational Fluid Dynamics", "Data Visualisation", "Feature Extraction", "Flow Simulation", "Flow Visualisation", "Feature Based Visualization Algorithm", "Large Scale Flow Data", "Scientific Visualization", "Graphical Images", "Flow Visualization", "Oceanic Data Analysis", "Computational Complexity", "Feature Extraction", "Feature Analysis", "Data Visualization", "Large Scale Systems", "Oceans", "Spirals", "Computational Efficiency", "Image Analysis", "Feature Extraction", "Data Flow Computing", "Computational Modeling", "Analytical Models", "Seeds Distribution", "Streamline", "Feature Field", "Spiral Line", "Appending Grid Controller" ], "authors": [ { "affiliation": "Inst. of Remote Sensing Applic., Eng. Center, Chinese Acad. of Sci., Beijing, China", "fullName": "Liang Zhong", "givenName": "Liang", "surname": "Zhong", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. of Remote Sensing Applic., Eng. Center, Chinese Acad. of Sci., Beijing, China", "fullName": "ChiTian He", "givenName": "ChiTian", "surname": "He", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. of Remote Sensing Applic., Eng. Center, Chinese Acad. of Sci., Beijing, China", "fullName": "Xin Zhang", "givenName": "Xin", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccms", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-01-01T00:00:00", "pubType": "proceedings", "pages": "194-197", "year": "2010", "issn": null, "isbn": "978-1-4244-5642-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3941a121", "articleId": "12OmNrMZpCm", "__typename": "AdjacentArticleType" }, "next": { "fno": "3941a125", "articleId": "12OmNCzsKFp", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sc/2005/2758/0/27580006", "title": "Intelligent Feature Extraction and Tracking for Visualizing Large-Scale 4D Flow Simulations", "doi": null, "abstractUrl": "/proceedings-article/sc/2005/27580006/12OmNAXxX19", "parentPublication": { "id": "proceedings/sc/2005/2758/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2016/8942/0/8942a009", "title": "Feature Extraction and Visualization for Symbolic People Flow Data", "doi": null, "abstractUrl": "/proceedings-article/iv/2016/8942a009/12OmNAsk4Fh", "parentPublication": { "id": "proceedings/iv/2016/8942/0", "title": "2016 20th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1995/7187/0/71870379", "title": "Flow visualization in a hypersonic fin/ramp flow", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1995/71870379/12OmNqBtj4A", "parentPublication": { "id": "proceedings/ieee-vis/1995/7187/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031c471", "title": "Three-Dimensional Flow Field in a Large-scale Gas Control Valve", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031c471/12OmNvzJGc7", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2014/7981/0/7981a072", "title": "Semi-active Spiral Flow Channel Magnetorheological Damper", "doi": null, "abstractUrl": "/proceedings-article/cse/2014/7981a072/12OmNxw5ByD", "parentPublication": { "id": "proceedings/cse/2014/7981/0", "title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1993/3940/0/00398848", "title": "Visualization of time-dependent flow fields", "doi": null, "abstractUrl": "/proceedings-article/visual/1993/00398848/12OmNzkuKGS", "parentPublication": { "id": "proceedings/visual/1993/3940/0", "title": "Proceedings Visualization '93", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/05/07117453", "title": "A Vocabulary Approach to Partial Streamline Matching and Exploratory Flow Visualization", "doi": null, "abstractUrl": "/journal/tg/2016/05/07117453/13rRUEgs2C0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/04658158", "title": "Interactive Visualization and Analysis of Transitional Flow", "doi": null, "abstractUrl": "/journal/tg/2008/06/04658158/13rRUNvyaeU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2013/06/mcs2013060096", "title": "Texture-Based Flow Visualization", "doi": null, "abstractUrl": "/magazine/cs/2013/06/mcs2013060096/13rRUwh80yj", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122063", "title": "Straightening Tubular Flow for Side-by-Side Visualization", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122063/13rRUy2YLSY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy5hRd7", "title": "2009 International Forum on Information Technology and Applications (IFITA)", "acronym": "ifita", "groupId": "1002862", "volume": "2", "displayVolume": "2", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNyqzLUP", "doi": "10.1109/IFITA.2009.35", "title": "Numerical Simulation and Visualization of Thermal and Flow Fields of MOCVD", "normalizedTitle": "Numerical Simulation and Visualization of Thermal and Flow Fields of MOCVD", "abstract": "Method of Computational Fluid Dynamics (CFD) was applied to numerical simulation of gases' thermal and flow fields of Metal Organic Chemical Vapor Deposition (MOCVD) reactor which grows high-efficiency three-junction GaInP/GaAs/Ge tandem solar cells. Virtual Reality (VR) technology was applied to visualization of numerical simulation of gas's thermal and flow fields of MOCVD reactor. The results of numerical simulation provide optimization of processing parameters in MOCVD reactor under a certain conditions, providing rational suggestion for optimization design in size of the substrate in the reactor. The results of visualization truly and intuitively display distributing situation of gas's temperature field and velocity field in MOCVD reactor, providing further optimizations of processing parameters of GaInP thin film grown by MOCVD with theoretical basis.", "abstracts": [ { "abstractType": "Regular", "content": "Method of Computational Fluid Dynamics (CFD) was applied to numerical simulation of gases' thermal and flow fields of Metal Organic Chemical Vapor Deposition (MOCVD) reactor which grows high-efficiency three-junction GaInP/GaAs/Ge tandem solar cells. Virtual Reality (VR) technology was applied to visualization of numerical simulation of gas's thermal and flow fields of MOCVD reactor. The results of numerical simulation provide optimization of processing parameters in MOCVD reactor under a certain conditions, providing rational suggestion for optimization design in size of the substrate in the reactor. The results of visualization truly and intuitively display distributing situation of gas's temperature field and velocity field in MOCVD reactor, providing further optimizations of processing parameters of GaInP thin film grown by MOCVD with theoretical basis.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Method of Computational Fluid Dynamics (CFD) was applied to numerical simulation of gases' thermal and flow fields of Metal Organic Chemical Vapor Deposition (MOCVD) reactor which grows high-efficiency three-junction GaInP/GaAs/Ge tandem solar cells. Virtual Reality (VR) technology was applied to visualization of numerical simulation of gas's thermal and flow fields of MOCVD reactor. The results of numerical simulation provide optimization of processing parameters in MOCVD reactor under a certain conditions, providing rational suggestion for optimization design in size of the substrate in the reactor. The results of visualization truly and intuitively display distributing situation of gas's temperature field and velocity field in MOCVD reactor, providing further optimizations of processing parameters of GaInP thin film grown by MOCVD with theoretical basis.", "fno": "3600b749", "keywords": [ "Computational Fluid Dynamics", "Metal Organic Chemical Vapor Deposition", "Virtual Reality Technology", "Visualization", "Optimization", "Processing Parameters" ], "authors": [ { "affiliation": null, "fullName": "Yu Tao", "givenName": "Yu", "surname": "Tao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hu Guihua", "givenName": "Hu", "surname": "Guihua", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhu Wenhua", "givenName": "Zhu", "surname": "Wenhua", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hu Xiaomei", "givenName": "Hu", "surname": "Xiaomei", "__typename": "ArticleAuthorType" } ], "idPrefix": "ifita", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-05-01T00:00:00", "pubType": "proceedings", "pages": "749-754", "year": "2009", "issn": null, "isbn": "978-0-7695-3600-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3600b745", "articleId": "12OmNvxbhI4", "__typename": "AdjacentArticleType" }, "next": { "fno": "3600b755", "articleId": "12OmNzt0IEu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icecs/2009/3937/0/3937a403", "title": "Simulation of Convection Heat Transfer in Thermal Flow Reversal Reactor for Lean Methane Oxidation", "doi": null, "abstractUrl": "/proceedings-article/icecs/2009/3937a403/12OmNBSSVb8", "parentPublication": { "id": "proceedings/icecs/2009/3937/0", "title": "Environmental and Computer Science, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2010/4286/2/4286b648", "title": "The Design and Tests in a Three Interconnected Fluidized Bed", "doi": null, "abstractUrl": "/proceedings-article/icdma/2010/4286b648/12OmNCdk2HG", "parentPublication": { "id": "proceedings/icdma/2010/4286/2", "title": "2010 International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eurdac/1993/4350/0/00410652", "title": "CAD: The numerical and analytical methods combined for the analysis of IC's thermal fields", "doi": null, "abstractUrl": "/proceedings-article/eurdac/1993/00410652/12OmNrF2DLV", "parentPublication": { "id": "proceedings/eurdac/1993/4350/0", "title": "Proceedings of EURO-DAC 93 and EURO-VHDL 93- European Design Automation Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/adhes/1998/4934/0/00742057", "title": "Numerical analysis of adhesive shrinkage due to thermal cycling", "doi": null, "abstractUrl": "/proceedings-article/adhes/1998/00742057/12OmNvAAtmD", "parentPublication": { "id": "proceedings/adhes/1998/4934/0", "title": "Proceedings of 3rd International Conference on Adhesive Joining and Coating Technology in Electronics Manufacturing 1998. Presented at Adhesives '98", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cdciem/2011/4350/0/4350a496", "title": "Numerical Investigation on Thermal Flow-Reversal Oxidation of Dilute Methane", "doi": null, "abstractUrl": "/proceedings-article/cdciem/2011/4350a496/12OmNxA3Z9B", "parentPublication": { "id": "proceedings/cdciem/2011/4350/0", "title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ectc/2017/6315/0/07999964", "title": "Equivalent Thermal Conductivity Model Based Full Scale Numerical Simulation for Thermal Management in Fan-Out Packages", "doi": null, "abstractUrl": "/proceedings-article/ectc/2017/07999964/12OmNxWLTpp", "parentPublication": { "id": "proceedings/ectc/2017/6315/0", "title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2009/3583/2/3583b775", "title": "Delamination in Thermohyperelastic Plastic IC Packaging Material Due to Thermal Load and Moisture", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583b775/12OmNyQGS6c", "parentPublication": { "id": "proceedings/icmtma/2009/3583/2", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1993/3940/0/00398848", "title": "Visualization of time-dependent flow fields", "doi": null, "abstractUrl": "/proceedings-article/visual/1993/00398848/12OmNzkuKGS", "parentPublication": { "id": "proceedings/visual/1993/3940/0", "title": "Proceedings Visualization '93", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mcsul/2009/3976/0/3976a055", "title": "Numerical Simulation of Water Circulation in a Cylindrical Horizontal Thermal Tank", "doi": null, "abstractUrl": "/proceedings-article/mcsul/2009/3976a055/12OmNzvQI02", "parentPublication": { "id": "proceedings/mcsul/2009/3976/0", "title": "Computational Modeling, Southern Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/09/06060947", "title": "Efficient Computation of Combinatorial Feature Flow Fields", "doi": null, "abstractUrl": "/journal/tg/2012/09/06060947/13rRUILtJqP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyQ7FQU", "title": "Proceedings Visualization '93", "acronym": "visual", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "1993", "__typename": "ProceedingType" }, "article": { "id": "12OmNzkuKGS", "doi": "10.1109/VISUAL.1993.398848", "title": "Visualization of time-dependent flow fields", "normalizedTitle": "Visualization of time-dependent flow fields", "abstract": "Presently, there are very few visualization systems available for time-dependent flow fields. Although existing visualization systems for instantaneous flow fields may be used to view time-dependent flow fields at discrete points in time, the time variable is usually not considered in the visualization technique. We present a simple and effective approach for visualizing time-dependent flow fields using streaklines. A system was developed to demonstrate this approach. The system can process many time frames of flow fields without requiring that all the data be in memory simultaneously, and it also handles flow fields with moving grids. We have used the system to visualize streaklines from several large 3-D time-dependent flow fields with moving grids. The system was able to provide useful insights to the physical phenomena in the flow fields.<>", "abstracts": [ { "abstractType": "Regular", "content": "Presently, there are very few visualization systems available for time-dependent flow fields. Although existing visualization systems for instantaneous flow fields may be used to view time-dependent flow fields at discrete points in time, the time variable is usually not considered in the visualization technique. We present a simple and effective approach for visualizing time-dependent flow fields using streaklines. A system was developed to demonstrate this approach. The system can process many time frames of flow fields without requiring that all the data be in memory simultaneously, and it also handles flow fields with moving grids. We have used the system to visualize streaklines from several large 3-D time-dependent flow fields with moving grids. The system was able to provide useful insights to the physical phenomena in the flow fields.<>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presently, there are very few visualization systems available for time-dependent flow fields. Although existing visualization systems for instantaneous flow fields may be used to view time-dependent flow fields at discrete points in time, the time variable is usually not considered in the visualization technique. We present a simple and effective approach for visualizing time-dependent flow fields using streaklines. A system was developed to demonstrate this approach. The system can process many time frames of flow fields without requiring that all the data be in memory simultaneously, and it also handles flow fields with moving grids. We have used the system to visualize streaklines from several large 3-D time-dependent flow fields with moving grids. The system was able to provide useful insights to the physical phenomena in the flow fields.", "fno": "00398848", "keywords": [ "Flow Visualisation", "Physics Computing", "Data Visualisation", "3 D Flow Fields", "Time Dependent Flow Fields", "Visualization Systems", "Instantaneous Flow Fields", "Streaklines", "Time Frames", "Moving Grids", "Physical Phenomena", "Data Visualization", "Aerodynamics", "NASA", "Data Flow Computing", "Fluid Dynamics", "Computational Fluid Dynamics", "Isosurfaces", "Computational Modeling", "Numerical Models" ], "authors": [ { "affiliation": "NASA Ames Res. Center, Moffett Field, CA, USA", "fullName": "D.A. Lane", "givenName": "D.A.", "surname": "Lane", "__typename": "ArticleAuthorType" } ], "idPrefix": "visual", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1993-01-01T00:00:00", "pubType": "proceedings", "pages": "32,33,34,35,36,37,38", "year": "1993", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00398847", "articleId": "12OmNCbU37i", "__typename": "AdjacentArticleType" }, "next": { "fno": "00398849", "articleId": "12OmNscxj73", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/visual/1992/2897/0/00235227", "title": "Virtual Smoke: an interactive 3D flow visualization technique", "doi": null, "abstractUrl": "/proceedings-article/visual/1992/00235227/12OmNAXxXic", "parentPublication": { "id": "proceedings/visual/1992/2897/0", "title": "Proceedings Visualization '92", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1994/6627/0/00346311", "title": "UFAT-a particle tracer for time-dependent flow fields", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346311/12OmNqHqSpB", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300018", "title": "Image Space Based Visualization of Unsteady Flow on Surfaces", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300018/12OmNxH9Xhw", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2010/6685/0/05429599", "title": "Physically-based interactive schlieren flow visualization", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2010/05429599/12OmNyLiuAd", "parentPublication": { "id": "proceedings/pacificvis/2010/6685/0", "title": "2010 IEEE Pacific Visualization Symposium (PacificVis 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300015", "title": "A Texture-Based Framework for Spacetime-Coherent Visualization of Time-Dependent Vector Fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300015/12OmNyv7mgw", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532848", "title": "Eyelet particle tracing - steady visualization of unsteady flow", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532848/12OmNzA6GSA", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532853", "title": "Texture-based visualization of uncertainty in flow fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532853/12OmNzXWZGL", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532851", "title": "Extraction of parallel vector surfaces in 3D time-dependent fields and application to vortex core line tracking", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532851/12OmNzcPAD3", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/06/v0637", "title": "ISA and IBFVS: Image Space-Based Visualization of Flow on Surfaces", "doi": null, "abstractUrl": "/journal/tg/2004/06/v0637/13rRUwInuWn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061404", "title": "Generation of Accurate Integral Surfaces in Time-Dependent Vector Fields", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061404/13rRUwjXZS7", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1AqwYO1eX72", "title": "2021 IEEE International Conference on Data Mining (ICDM)", "acronym": "icdm", "groupId": "1000179", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1Aqx9wIYoZW", "doi": "10.1109/ICDM51629.2021.00152", "title": "PhyFlow: Physics-Guided Deep Learning for Generating Interpretable 3D Flow Fields", "normalizedTitle": "PhyFlow: Physics-Guided Deep Learning for Generating Interpretable 3D Flow Fields", "abstract": "Generating flow fields (such as pressure and velocity fields) in 3D space is a fundamental task in computational fluid dynamics (CFD), with applications across a vast spectrum of science and engineering problems. An important class of fluid flow problems in CFD is multi-phase flow, where dispersed solid particles are present in the fluid flow. Despite recent developments in deep learning (DL) for CFD applications, current state-of-the-art is still unable to model 3D flow fields, especially in multi-phase flow settings. It is with this goal that we introduce PhyFlow, a novel physics-guided deep learning architecture for modeling 3D multi-phase fluid flows, designed to mimic the popular projection method for solving fluid flows in CFD simulations. We demonstrate that PhyFlow generates high quality flow fields and yields a 49.61&#x0025; improvement over other state-of-the-art baselines. We also test the quality of PhyFlow based fields by employing them in downstream tasks like particle drag force prediction and demonstrate state-of-the-art results, improving upon the previous best models by 9.89&#x0025;. Finally, we demonstrate the consistency of PhyFlow predictions with known underlying physics governing equations. Our source code and data are available online <sup>&#x002A;</sup>.<sup>&#x002A;</sup>tinyurl.com/mjkcrsdw", "abstracts": [ { "abstractType": "Regular", "content": "Generating flow fields (such as pressure and velocity fields) in 3D space is a fundamental task in computational fluid dynamics (CFD), with applications across a vast spectrum of science and engineering problems. An important class of fluid flow problems in CFD is multi-phase flow, where dispersed solid particles are present in the fluid flow. Despite recent developments in deep learning (DL) for CFD applications, current state-of-the-art is still unable to model 3D flow fields, especially in multi-phase flow settings. It is with this goal that we introduce PhyFlow, a novel physics-guided deep learning architecture for modeling 3D multi-phase fluid flows, designed to mimic the popular projection method for solving fluid flows in CFD simulations. We demonstrate that PhyFlow generates high quality flow fields and yields a 49.61&#x0025; improvement over other state-of-the-art baselines. We also test the quality of PhyFlow based fields by employing them in downstream tasks like particle drag force prediction and demonstrate state-of-the-art results, improving upon the previous best models by 9.89&#x0025;. Finally, we demonstrate the consistency of PhyFlow predictions with known underlying physics governing equations. Our source code and data are available online <sup>&#x002A;</sup>.<sup>&#x002A;</sup>tinyurl.com/mjkcrsdw", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Generating flow fields (such as pressure and velocity fields) in 3D space is a fundamental task in computational fluid dynamics (CFD), with applications across a vast spectrum of science and engineering problems. An important class of fluid flow problems in CFD is multi-phase flow, where dispersed solid particles are present in the fluid flow. Despite recent developments in deep learning (DL) for CFD applications, current state-of-the-art is still unable to model 3D flow fields, especially in multi-phase flow settings. It is with this goal that we introduce PhyFlow, a novel physics-guided deep learning architecture for modeling 3D multi-phase fluid flows, designed to mimic the popular projection method for solving fluid flows in CFD simulations. We demonstrate that PhyFlow generates high quality flow fields and yields a 49.61% improvement over other state-of-the-art baselines. We also test the quality of PhyFlow based fields by employing them in downstream tasks like particle drag force prediction and demonstrate state-of-the-art results, improving upon the previous best models by 9.89%. Finally, we demonstrate the consistency of PhyFlow predictions with known underlying physics governing equations. Our source code and data are available online *.*tinyurl.com/mjkcrsdw", "fno": "239800b246", "keywords": [ "Computational Fluid Dynamics", "Drag", "Flow Simulation", "Two Phase Flow", "Generating Interpretable 3 D Flow Fields", "Velocity Fields", "Computational Fluid Dynamics", "Engineering Problems", "Fluid Flow Problems", "Solid Particles", "CFD Applications", "3 D Flow Fields", "Multiphase Flow Settings", "3 D Multiphase Fluid", "CFD Simulations", "Phy Flow Based Fields", "Particle Drag Force Prediction", "Phy Flow Predictions", "Physics Guided Deep Learning Architecture", "Deep Learning", "Solid Modeling", "Three Dimensional Displays", "Computational Fluid Dynamics", "Force", "Fluid Flow", "Predictive Models", "Physics Guided ML", "Deep Learning", "CFD" ], "authors": [ { "affiliation": "Virginia Tech,Department of Computer Science", "fullName": "Nikhil Muralidhar", "givenName": "Nikhil", "surname": "Muralidhar", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech,Department of Computer Science", "fullName": "Jie Bu", "givenName": "Jie", "surname": "Bu", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech,Department of Mechanical Engineering", "fullName": "Ze Cao", "givenName": "Ze", "surname": "Cao", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech,Department of Mechanical Engineering", "fullName": "Neil Raj", "givenName": "Neil", "surname": "Raj", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech,Department of Computer Science", "fullName": "Naren Ramakrishnan", "givenName": "Naren", "surname": "Ramakrishnan", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech,Department of Mechanical Engineering", "fullName": "Danesh Tafti", "givenName": "Danesh", "surname": "Tafti", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech,Department of Computer Science", "fullName": "Anuj Karpatne", "givenName": "Anuj", "surname": "Karpatne", "__typename": "ArticleAuthorType" } ], "idPrefix": "icdm", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-12-01T00:00:00", "pubType": "proceedings", "pages": "1246-1251", "year": "2021", "issn": null, "isbn": "978-1-6654-2398-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "239800b240", "articleId": "1AqxaHHS1uo", "__typename": "AdjacentArticleType" }, "next": { "fno": "239800b252", "articleId": "1Aqx3r6eekw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/med/2006/1/0/04124896", "title": "Control Oriented Models & Feedback Design in Fluid Flow Systems: A Review", "doi": null, "abstractUrl": "/proceedings-article/med/2006/04124896/12OmNA0MZ8H", "parentPublication": { "id": "proceedings/med/2006/1/0", "title": "Proceedings of the 14th Mediterranean Conference on Control and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2016/3834/0/3834a342", "title": "A Computational Approach for Blood Flow Analysis in the Densely Coiled Cerebral Aneurysm", "doi": null, "abstractUrl": "/proceedings-article/bibe/2016/3834a342/12OmNC8MsAG", "parentPublication": { "id": "proceedings/bibe/2016/3834/0", "title": "2016 IEEE 16th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1994/6627/0/00346311", "title": "UFAT-a particle tracer for time-dependent flow fields", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346311/12OmNqHqSpB", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cso/2011/4335/0/4335a081", "title": "Resistance Calculations of Trimaran Hull Form Using Computational Fluid Dynamics", "doi": null, "abstractUrl": "/proceedings-article/cso/2011/4335a081/12OmNvDqsAO", "parentPublication": { "id": "proceedings/cso/2011/4335/0", "title": "2011 Fourth International Joint Conference on Computational Sciences and Optimization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icic/2009/3634/4/3634d358", "title": "Study on Numerical Simulation of Single-Phase Injection Device Flow Flied", "doi": null, "abstractUrl": "/proceedings-article/icic/2009/3634d358/12OmNy4IEY3", "parentPublication": { "id": "proceedings/icic/2009/3634/4", "title": "2009 Second International Conference on Information and Computing Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2010/6685/0/05429599", "title": "Physically-based interactive schlieren flow visualization", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2010/05429599/12OmNyLiuAd", "parentPublication": { "id": "proceedings/pacificvis/2010/6685/0", "title": "2010 IEEE Pacific Visualization Symposium (PacificVis 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifita/2009/3600/2/3600b749", "title": "Numerical Simulation and Visualization of Thermal and Flow Fields of MOCVD", "doi": null, "abstractUrl": "/proceedings-article/ifita/2009/3600b749/12OmNyqzLUP", "parentPublication": { "id": "proceedings/ifita/2009/3600/2", "title": "2009 International Forum on Information Technology and Applications (IFITA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/med/2006/1/0/04124861", "title": "Control Law Design for Channel Flow - 2D Designs and 3D Performance Evaluation", "doi": null, "abstractUrl": "/proceedings-article/med/2006/04124861/12OmNzahbYZ", "parentPublication": { "id": "proceedings/med/2006/1/0", "title": "Proceedings of the 14th Mediterranean Conference on Control and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880051", "title": "Investigating Swirl and Tumble Flow with a Comparison of Visualization Techniques", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880051/12OmNzd7c2u", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1993/3940/0/00398848", "title": "Visualization of time-dependent flow fields", "doi": null, "abstractUrl": "/proceedings-article/visual/1993/00398848/12OmNzkuKGS", "parentPublication": { "id": "proceedings/visual/1993/3940/0", "title": "Proceedings Visualization '93", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzVGcJn", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "acronym": "fg", "groupId": "1000065", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNA0MZ3d", "doi": "10.1109/AFGR.2008.4813308", "title": "Spontaneous facial expression classification with facial motion vectors", "normalizedTitle": "Spontaneous facial expression classification with facial motion vectors", "abstract": "This paper proposes a novel spontaneous facial expression classification method using the facial motion magnification which transforms the subtle facial expressions into the corresponding exaggerated facial expressions. Facial motion magnification consists of four steps: First, we perform the active appearance model (AAM) fitting to extract 70 facial feature points in the face image sequence. Second, we align the face image sequence using the static three feature points. Third, we estimate the motion vectors of 27 feature points using the feature point tracking method. Finally, we obtain the exaggerated facial expressions by magnifying the motion vectors of the 27 feature points. After facial motion magnification, we recognize the exaggerated facial expressions using the support vector machines (SVM) to classify the facial expression features. Experimental results of the subtle facial expression recognition show promising results of the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes a novel spontaneous facial expression classification method using the facial motion magnification which transforms the subtle facial expressions into the corresponding exaggerated facial expressions. Facial motion magnification consists of four steps: First, we perform the active appearance model (AAM) fitting to extract 70 facial feature points in the face image sequence. Second, we align the face image sequence using the static three feature points. Third, we estimate the motion vectors of 27 feature points using the feature point tracking method. Finally, we obtain the exaggerated facial expressions by magnifying the motion vectors of the 27 feature points. After facial motion magnification, we recognize the exaggerated facial expressions using the support vector machines (SVM) to classify the facial expression features. Experimental results of the subtle facial expression recognition show promising results of the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes a novel spontaneous facial expression classification method using the facial motion magnification which transforms the subtle facial expressions into the corresponding exaggerated facial expressions. Facial motion magnification consists of four steps: First, we perform the active appearance model (AAM) fitting to extract 70 facial feature points in the face image sequence. Second, we align the face image sequence using the static three feature points. Third, we estimate the motion vectors of 27 feature points using the feature point tracking method. Finally, we obtain the exaggerated facial expressions by magnifying the motion vectors of the 27 feature points. After facial motion magnification, we recognize the exaggerated facial expressions using the support vector machines (SVM) to classify the facial expression features. Experimental results of the subtle facial expression recognition show promising results of the proposed method.", "fno": "04813308", "keywords": [ "Face Recognition", "Feature Extraction", "Image Classification", "Image Motion Analysis", "Image Sequences", "Support Vector Machines", "Spontaneous Facial Expression Classification", "Facial Motion Vector", "Facial Motion Magnification", "Subtle Facial Expression", "Exaggerated Facial Expression", "Active Appearance Model", "Feature Extraction", "Face Image Sequence", "Static Three Feature Points", "Feature Point Tracking", "Support Vector Machine", "Facial Expression Feature Classification", "Facial Expression Recognition", "Shape", "Face Recognition", "Active Appearance Model", "Facial Features", "Feature Extraction", "Image Sequences", "Motion Estimation", "Tracking", "Support Vector Machines", "Support Vector Machine Classification" ], "authors": [ { "affiliation": "Department of Computer Science and Engineering, POSTECH, San 31, Hyoja-Dong, Nam-Gu, Pohang, 790-784, Korea", "fullName": "Sungsoo Park", "givenName": "Sungsoo", "surname": "Park", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science and Engineering, POSTECH, San 31, Hyoja-Dong, Nam-Gu, Pohang, 790-784, Korea", "fullName": "Daijin Kim", "givenName": "Daijin", "surname": "Kim", "__typename": "ArticleAuthorType" } ], "idPrefix": "fg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-09-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": null, "isbn": "978-1-4244-2153-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04813306", "articleId": "12OmNwBT1rD", "__typename": "AdjacentArticleType" }, "next": { "fno": "04813309", "articleId": "12OmNxT56Bl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/aiccsa/2013/0792/0/06616505", "title": "Automatic Facial Expression Recognition System", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2013/06616505/12OmNBsue6K", "parentPublication": { "id": "proceedings/aiccsa/2013/0792/0", "title": "2013 ACS International Conference on Computer Systems and Applications (AICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2009/3866/0/3866a264", "title": "A Quadratic Deformation Model for Facial Expression Recognition", "doi": null, "abstractUrl": "/proceedings-article/dicta/2009/3866a264/12OmNC4wtDl", "parentPublication": { "id": "proceedings/dicta/2009/3866/0", "title": "2009 Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543261", "title": "Learning spatial weighting via quadratic programming for facial expression analysis", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543261/12OmNCm7BM1", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1995/7042/0/70420360", "title": "Facial expression recognition using a dynamic model and motion energy", "doi": null, "abstractUrl": "/proceedings-article/iccv/1995/70420360/12OmNrYlmBV", "parentPublication": { "id": "proceedings/iccv/1995/7042/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543267", "title": "Facial expression recognition using Gabor motion energy filters", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543267/12OmNvvLi5z", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813304", "title": "3D facial expression recognition based on properties of line segments connecting facial feature points", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813304/12OmNwnYG0Y", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761398", "title": "Facial expression analysis with facial expression deformation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761398/12OmNxRnvPb", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2013/5545/0/06553788", "title": "A high-resolution spontaneous 3D dynamic facial expression database", "doi": null, "abstractUrl": "/proceedings-article/fg/2013/06553788/12OmNy2Jt31", "parentPublication": { "id": "proceedings/fg/2013/5545/0", "title": "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2013/5545/0/06553775", "title": "Perceptual effects of damped and exaggerated facial motion in animated characters", "doi": null, "abstractUrl": "/proceedings-article/fg/2013/06553775/12OmNyTfg3l", "parentPublication": { "id": "proceedings/fg/2013/5545/0", "title": "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2020/02/08186192", "title": "Co-Clustering to Reveal Salient Facial Features for Expression Recognition", "doi": null, "abstractUrl": "/journal/ta/2020/02/08186192/13rRUNvgz8e", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwJPMXm", "title": "Communications and Mobile Computing, International Conference on", "acronym": "cmc", "groupId": "1002644", "volume": "3", "displayVolume": "3", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNA0vnT6", "doi": "10.1109/CMC.2010.151", "title": "3D Face Reconstruction from Single 2D Image Based on Robust Facial Feature Points Extraction and Generic Wire Frame Model", "normalizedTitle": "3D Face Reconstruction from Single 2D Image Based on Robust Facial Feature Points Extraction and Generic Wire Frame Model", "abstract": "An efficient 3D face reconstruction method for any single 2D facial image even partially damaged or hidden is proposed in this paper. Our purpose is to overcome the shortcomings of traditional methods such as providing at least couple of orthogonal facial images, needing complicated calculations, and so on. Especially, most of the existing methods can’t be used in real-time communication and mobile computing environment, e.g. video transmission systems. Firstly, some facial feature points were extracted by an improved active appearance models, we do some significant work on robust feature point localization when the facial images are not very well for implementation of the AAMs algorithm. Then we adjusted overall and local generic wire frame model using the feature points. Finally we constructed realistic facial texture. Experimental results show that our proposed 3D reconstruction method cost little time to model from a single image and is suitable for real-time and mobile applications.", "abstracts": [ { "abstractType": "Regular", "content": "An efficient 3D face reconstruction method for any single 2D facial image even partially damaged or hidden is proposed in this paper. Our purpose is to overcome the shortcomings of traditional methods such as providing at least couple of orthogonal facial images, needing complicated calculations, and so on. Especially, most of the existing methods can’t be used in real-time communication and mobile computing environment, e.g. video transmission systems. Firstly, some facial feature points were extracted by an improved active appearance models, we do some significant work on robust feature point localization when the facial images are not very well for implementation of the AAMs algorithm. Then we adjusted overall and local generic wire frame model using the feature points. Finally we constructed realistic facial texture. Experimental results show that our proposed 3D reconstruction method cost little time to model from a single image and is suitable for real-time and mobile applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An efficient 3D face reconstruction method for any single 2D facial image even partially damaged or hidden is proposed in this paper. Our purpose is to overcome the shortcomings of traditional methods such as providing at least couple of orthogonal facial images, needing complicated calculations, and so on. Especially, most of the existing methods can’t be used in real-time communication and mobile computing environment, e.g. video transmission systems. Firstly, some facial feature points were extracted by an improved active appearance models, we do some significant work on robust feature point localization when the facial images are not very well for implementation of the AAMs algorithm. Then we adjusted overall and local generic wire frame model using the feature points. Finally we constructed realistic facial texture. Experimental results show that our proposed 3D reconstruction method cost little time to model from a single image and is suitable for real-time and mobile applications.", "fno": "3989c396", "keywords": [ "Facial Feature Points Extraction", "Active Appearance Models", "3 D Wire Frame Model", "3 D Face Reconstruction" ], "authors": [ { "affiliation": null, "fullName": "Xiaojiu Fan", "givenName": "Xiaojiu", "surname": "Fan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qiang Peng", "givenName": "Qiang", "surname": "Peng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ming Zhong", "givenName": "Ming", "surname": "Zhong", "__typename": "ArticleAuthorType" } ], "idPrefix": "cmc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-04-01T00:00:00", "pubType": "proceedings", "pages": "396-400", "year": "2010", "issn": null, "isbn": "978-0-7695-3989-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3989c392", "articleId": "12OmNyqiaNB", "__typename": "AdjacentArticleType" }, "next": { "fno": "3989c401", "articleId": "12OmNqBKTXw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/kse/2011/4567/0/4567a120", "title": "3D Facial Reconstruction System from Skull for Vietnamese", "doi": null, "abstractUrl": "/proceedings-article/kse/2011/4567a120/12OmNAolGTt", "parentPublication": { "id": "proceedings/kse/2011/4567/0", "title": "Knowledge and Systems Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2009/4442/0/05457448", "title": "Combining online and offline learning for tracking a talking face in video", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457448/12OmNBKmXgV", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iita/2009/3859/3/3859c274", "title": "Facial Feature Extraction on Fiducial Points and Used in Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/iita/2009/3859c274/12OmNBh8gUe", "parentPublication": { "id": "proceedings/iita/2009/3859/3", "title": "2009 Third International Symposium on Intelligent Information Technology Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109d963", "title": "Real-Time 3D Face and Facial Action Tracking Using Extended 2D+3D AAMs", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109d963/12OmNroijel", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mvhi/2010/4009/0/4009a330", "title": "A Modified Feature Points Extraction Algorithm and it's Adaptability Evaluation", "doi": null, "abstractUrl": "/proceedings-article/mvhi/2010/4009a330/12OmNvqmUCF", "parentPublication": { "id": "proceedings/mvhi/2010/4009/0", "title": "Machine Vision and Human-machine Interface, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aici/2010/4225/1/4225a219", "title": "Facial Expression Recognition from Image Sequences Based on Feature Points and Canonical Correlations", "doi": null, "abstractUrl": "/proceedings-article/aici/2010/4225a219/12OmNx5piYw", "parentPublication": { "id": "proceedings/aici/2010/4225/1", "title": "Artificial Intelligence and Computational Intelligence, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/1/81831468", "title": "A comparison of detailed automatic wire-frame fitting methods", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81831468/12OmNxXl5wt", "parentPublication": { "id": "proceedings/icip/1997/8183/1", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459283", "title": "Robust facial feature tracking using selected multi-resolution linear predictors", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459283/12OmNyGtjje", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2011/4467/0/4467a244", "title": "Face It: 3D Facial Reconstruction from a Single 2D Image for Games and Simulations", "doi": null, "abstractUrl": "/proceedings-article/cw/2011/4467a244/12OmNyQphaA", "parentPublication": { "id": "proceedings/cw/2011/4467/0", "title": "2011 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349549", "title": "Perception of emotional expressions in different representations using facial feature points", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349549/12OmNzUgdes", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAsTgX3", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNB0FxhZ", "doi": "10.1109/CVPRW.2009.5204260", "title": "Use of Active Appearance Models for analysis and synthesis of naturally occurring behavior", "normalizedTitle": "Use of Active Appearance Models for analysis and synthesis of naturally occurring behavior", "abstract": "Significant efforts have been made in the analysis and understanding of naturally occurring behavior. Active Appearance Models (AAM) are an especially exciting approach to this task for facial behavior. They may be used both to measure naturally occurring behavior and to synthesize photo-realistic real-time avatars with which to test hypotheses made possible by those measurements. We have used both of these capabilities, analysis and synthesis, to investigate the influence of depression on face-to-face interaction. With AAMs we have investigated large datasets of clinical interviews and successfully modeled and perturbed communicative behavior in a video conference paradigm to test causal hypotheses. These advances have lead to new understanding of the social functions of depression and dampened affect in dyadic interaction. Key challenges remain. These include automated detection and synthesis of subtle facial actions; hybrid methods that optimally integrate automated and manual processing; computational modeling of subjective states from multimodal input; and dynamic models of social and affective behavior.", "abstracts": [ { "abstractType": "Regular", "content": "Significant efforts have been made in the analysis and understanding of naturally occurring behavior. Active Appearance Models (AAM) are an especially exciting approach to this task for facial behavior. They may be used both to measure naturally occurring behavior and to synthesize photo-realistic real-time avatars with which to test hypotheses made possible by those measurements. We have used both of these capabilities, analysis and synthesis, to investigate the influence of depression on face-to-face interaction. With AAMs we have investigated large datasets of clinical interviews and successfully modeled and perturbed communicative behavior in a video conference paradigm to test causal hypotheses. These advances have lead to new understanding of the social functions of depression and dampened affect in dyadic interaction. Key challenges remain. These include automated detection and synthesis of subtle facial actions; hybrid methods that optimally integrate automated and manual processing; computational modeling of subjective states from multimodal input; and dynamic models of social and affective behavior.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Significant efforts have been made in the analysis and understanding of naturally occurring behavior. Active Appearance Models (AAM) are an especially exciting approach to this task for facial behavior. They may be used both to measure naturally occurring behavior and to synthesize photo-realistic real-time avatars with which to test hypotheses made possible by those measurements. We have used both of these capabilities, analysis and synthesis, to investigate the influence of depression on face-to-face interaction. With AAMs we have investigated large datasets of clinical interviews and successfully modeled and perturbed communicative behavior in a video conference paradigm to test causal hypotheses. These advances have lead to new understanding of the social functions of depression and dampened affect in dyadic interaction. Key challenges remain. These include automated detection and synthesis of subtle facial actions; hybrid methods that optimally integrate automated and manual processing; computational modeling of subjective states from multimodal input; and dynamic models of social and affective behavior.", "fno": "05204260", "keywords": [ "Avatars", "Face Recognition", "Gesture Recognition", "Active Appearance Models", "Naturally Occurring Behavior Analysis", "Naturally Occurring Behavior Synthesis", "Facial Behavior", "Photo Realistic Real Time Avatars", "Face To Face Interaction", "Video Conference Paradigm", "Social Functions", "Automated Detection", "Subtle Facial Actions", "Automated Processing", "Manual Processing", "Computational Modeling", "Active Appearance Model", "Avatars", "Acoustic Measurements", "Facial Animation", "Testing", "Face Detection", "Signal Analysis", "Signal Synthesis", "Associate Members", "Acoustic Signal Processing", "AAM", "Facial Expression", "Animation", "Depression" ], "authors": [ { "affiliation": "University of Pittsburgh and Adjunct Faculty at the Robotics Institute, Carnegie Mellon University: 3137 Sennott Square, 210 S. Bouquet St., PA 15260 USA", "fullName": "Jeffrey F. Cohn", "givenName": "Jeffrey F.", "surname": "Cohn", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-06-01T00:00:00", "pubType": "proceedings", "pages": "1-3", "year": "2009", "issn": "2160-7508", "isbn": "978-1-4244-3994-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05204259", "articleId": "12OmNwseEWG", "__typename": "AdjacentArticleType" }, "next": { "fno": "05204261", "articleId": "12OmNro0HZv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/hicss/2012/4525/0/4525a818", "title": "Social Media and Warning Response Impacts in Extreme Events: Results from a Naturally Occurring Experiment", "doi": null, "abstractUrl": "/proceedings-article/hicss/2012/4525a818/12OmNBWzHOx", "parentPublication": { "id": "proceedings/hicss/2012/4525/0", "title": "2012 45th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2017/4023/0/4023a414", "title": "EGGNOG: A Continuous, Multi-modal Data Set of Naturally Occurring Gestures with Ground Truth Labels", "doi": null, "abstractUrl": "/proceedings-article/fg/2017/4023a414/12OmNBqdraC", "parentPublication": { "id": "proceedings/fg/2017/4023/0", "title": "2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204279", "title": "Automatically detecting action units from faces of pain: Comparing shape and appearance features", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204279/12OmNCgrD38", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1993/3880/0/00341060", "title": "Shape-based tracking of naturally occurring annuli in image sequences", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1993/00341060/12OmNqEjhZB", "parentPublication": { "id": "proceedings/cvpr/1993/3880/0", "title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pci/2009/3788/0/3788a207", "title": "Avatars' Appearance and Social Behavior in Online Virtual Worlds", "doi": null, "abstractUrl": "/proceedings-article/pci/2009/3788a207/12OmNwt5sn9", "parentPublication": { "id": "proceedings/pci/2009/3788/0", "title": "2009 13th Panhellenic Conference on Informatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icst/2009/3601/0/3601a210", "title": "Evaluating the Effect of the Number of Naturally Occurring Faults on the Estimates Produced by Capture-Recapture Models", "doi": null, "abstractUrl": "/proceedings-article/icst/2009/3601a210/12OmNwwd2T8", "parentPublication": { "id": "proceedings/icst/2009/3601/0", "title": "2009 International Conference on Software Testing Verification and Validation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2002/1695/4/169540078", "title": "Automatic Recognition of Eye Blinking in Spontaneously Occurring Behavior", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/169540078/12OmNxdDFL9", "parentPublication": { "id": "proceedings/icpr/2002/1695/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icstw/2016/3674/0/5830a189", "title": "Using Petri Nets to Test Concurrent Behavior of Web Applications", "doi": null, "abstractUrl": "/proceedings-article/icstw/2016/5830a189/12OmNzdoN0V", "parentPublication": { "id": "proceedings/icstw/2016/3674/0", "title": "2016 IEEE Ninth International Conference on Software Testing, Verification and Validation Workshops (ICSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/nt/1999/06/00811445", "title": "Packet reordering is not pathological network behavior", "doi": null, "abstractUrl": "/journal/nt/1999/06/00811445/13rRUwInvc7", "parentPublication": { "id": "trans/nt", "title": "IEEE/ACM Transactions on Networking", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2021/3902/0/09671644", "title": "Modeling Longitudinal Behavior Dynamics Among Extremist Users in Twitter Data", "doi": null, "abstractUrl": "/proceedings-article/big-data/2021/09671644/1A8iYqQHj4k", "parentPublication": { "id": "proceedings/big-data/2021/3902/0", "title": "2021 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxvO07s", "title": "2008 Tenth IEEE International Symposium on Multimedia", "acronym": "ism", "groupId": "1001094", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNBWi6Kf", "doi": "10.1109/ISM.2008.121", "title": "A Natural Facial Expression Recognition Using Differential-AAM and k-NNS", "normalizedTitle": "A Natural Facial Expression Recognition Using Differential-AAM and k-NNS", "abstract": "This paper proposes a novel natural facial expression recognition method that recognizes a sequence of dynamic facial expression images using the differential active appearance model (AAM) and k-NNS as follows. First, we use the differential-AAM features (DAFs) that are computed from the difference of the AAM parameters between an input face image and a reference face image. Second, we perform the manifold learning. Third, we recognize the facial expression of the input face image in the embedded feature space using sequence based k-NN, k-NNS. Since we use DAFs, we also propose an effective way of finding the neutral facial expression as kernel density approximation. Experimental results show that (1) the DAFs improves the facial expression recognition performance than the conventional AAM features by 20% and (2) the sequence-based k-nearest neighbors classifier provides a 95% of facial expression recognition performance on the facial expression database (FED06).", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes a novel natural facial expression recognition method that recognizes a sequence of dynamic facial expression images using the differential active appearance model (AAM) and k-NNS as follows. First, we use the differential-AAM features (DAFs) that are computed from the difference of the AAM parameters between an input face image and a reference face image. Second, we perform the manifold learning. Third, we recognize the facial expression of the input face image in the embedded feature space using sequence based k-NN, k-NNS. Since we use DAFs, we also propose an effective way of finding the neutral facial expression as kernel density approximation. Experimental results show that (1) the DAFs improves the facial expression recognition performance than the conventional AAM features by 20% and (2) the sequence-based k-nearest neighbors classifier provides a 95% of facial expression recognition performance on the facial expression database (FED06).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes a novel natural facial expression recognition method that recognizes a sequence of dynamic facial expression images using the differential active appearance model (AAM) and k-NNS as follows. First, we use the differential-AAM features (DAFs) that are computed from the difference of the AAM parameters between an input face image and a reference face image. Second, we perform the manifold learning. Third, we recognize the facial expression of the input face image in the embedded feature space using sequence based k-NN, k-NNS. Since we use DAFs, we also propose an effective way of finding the neutral facial expression as kernel density approximation. Experimental results show that (1) the DAFs improves the facial expression recognition performance than the conventional AAM features by 20% and (2) the sequence-based k-nearest neighbors classifier provides a 95% of facial expression recognition performance on the facial expression database (FED06).", "fno": "3454a220", "keywords": [ "Facial Expression Recognition", "Active Appearance Model", "Manifold Learning" ], "authors": [ { "affiliation": null, "fullName": "Yeongjae Cheon", "givenName": "Yeongjae", "surname": "Cheon", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Daijin Kim", "givenName": "Daijin", "surname": "Kim", "__typename": "ArticleAuthorType" } ], "idPrefix": "ism", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "220-227", "year": "2008", "issn": null, "isbn": "978-0-7695-3454-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3454a214", "articleId": "12OmNvoFjQf", "__typename": "AdjacentArticleType" }, "next": { "fno": "3454a228", "articleId": "12OmNC4wtGk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2008/2153/0/04813308", "title": "Spontaneous facial expression classification with facial motion vectors", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813308/12OmNA0MZ3d", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109d776", "title": "Facial Expression Mimicking System", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109d776/12OmNrJiCY2", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813336", "title": "Recognizing partial facial action units based on 3D dynamic range data for facial expression recognition", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813336/12OmNro0Iaa", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349489", "title": "Evaluating AAM fitting methods for facial expression recognition", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349489/12OmNroij66", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2010/4215/0/4215a087", "title": "Development of a Facial Emotion Recognition Method Based on Combining AAM with DBN", "doi": null, "abstractUrl": "/proceedings-article/cw/2010/4215a087/12OmNwwd2In", "parentPublication": { "id": "proceedings/cw/2010/4215/0", "title": "2010 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisp/2008/3119/2/3119b680", "title": "An Expression Space Model for Facial Expression Analysis", "doi": null, "abstractUrl": "/proceedings-article/cisp/2008/3119b680/12OmNxR5UTS", "parentPublication": { "id": "proceedings/cisp/2008/3119/3", "title": "Image and Signal Processing, Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/324P3A13", "title": "Learning active facial patches for expression analysis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/324P3A13/12OmNxWLTjc", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2009/3762/0/3762a885", "title": "Age Estimation Using AAM and Local Facial Features", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2009/3762a885/12OmNyqiaWg", "parentPublication": { "id": "proceedings/iih-msp/2009/3762/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460927", "title": "Multi-view facial expression recognition using local appearance features", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460927/12OmNyrqzDG", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imvip/2008/3332/0/3332a063", "title": "Modelling the Manifold of Facial Expression Using Texture", "doi": null, "abstractUrl": "/proceedings-article/imvip/2008/3332a063/12OmNzkMlNk", "parentPublication": { "id": "proceedings/imvip/2008/3332/0", "title": "International Machine Vision and Image Processing Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNClQ0o4", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNqAU6G5", "doi": "10.1109/CVPRW.2010.5543611", "title": "A hierarchical approach to facial aging", "normalizedTitle": "A hierarchical approach to facial aging", "abstract": "Active Appearance Models (AAMs) have been used as a promising tool in the field of synthetic age progression. However, they are yet to be demonstrated on a large human population with wide variation. This paper presents a novel AAM-based hierarchical approach to facial aging. This work is motivated from studies in medical and anthropological literature on classification of human faces based on gender, ethnic and age groups. The proposed hierarchical model approach is a ethnicity and gender specific aging paradigm. Specifically, the Caucasian (European descent) and African American ethnic groups are considered. This work will further show that using individual hierarchical models generate better age-progressed synthetic images when compared to a general model approach. The results are evaluated by visual perception of the intended age group and preservation of identity. Also, a quantitative evaluation was performed using FaceVACS, a commercial face recognition system, as a surrogate measure. Higher match scores for synthetic images generated by hierarchical models when compared to those generated by a general model suggests the efficiency of the proposed hierarchical model approach.", "abstracts": [ { "abstractType": "Regular", "content": "Active Appearance Models (AAMs) have been used as a promising tool in the field of synthetic age progression. However, they are yet to be demonstrated on a large human population with wide variation. This paper presents a novel AAM-based hierarchical approach to facial aging. This work is motivated from studies in medical and anthropological literature on classification of human faces based on gender, ethnic and age groups. The proposed hierarchical model approach is a ethnicity and gender specific aging paradigm. Specifically, the Caucasian (European descent) and African American ethnic groups are considered. This work will further show that using individual hierarchical models generate better age-progressed synthetic images when compared to a general model approach. The results are evaluated by visual perception of the intended age group and preservation of identity. Also, a quantitative evaluation was performed using FaceVACS, a commercial face recognition system, as a surrogate measure. Higher match scores for synthetic images generated by hierarchical models when compared to those generated by a general model suggests the efficiency of the proposed hierarchical model approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Active Appearance Models (AAMs) have been used as a promising tool in the field of synthetic age progression. However, they are yet to be demonstrated on a large human population with wide variation. This paper presents a novel AAM-based hierarchical approach to facial aging. This work is motivated from studies in medical and anthropological literature on classification of human faces based on gender, ethnic and age groups. The proposed hierarchical model approach is a ethnicity and gender specific aging paradigm. Specifically, the Caucasian (European descent) and African American ethnic groups are considered. This work will further show that using individual hierarchical models generate better age-progressed synthetic images when compared to a general model approach. The results are evaluated by visual perception of the intended age group and preservation of identity. Also, a quantitative evaluation was performed using FaceVACS, a commercial face recognition system, as a surrogate measure. Higher match scores for synthetic images generated by hierarchical models when compared to those generated by a general model suggests the efficiency of the proposed hierarchical model approach.", "fno": "05543611", "keywords": [ "Face Recognition", "Visual Perception", "Facial Aging", "Active Appearance Models", "Synthetic Age Progression", "AAM Based Hierarchical Approach", "Ethnicity", "Gender Specific Aging Paradigm", "Caucasian", "African American Ethnic Groups", "Visual Perception", "Face VACS", "Face Recognition System", "Aging", "Skin", "Active Appearance Model", "Humans", "Biomedical Imaging", "Face Recognition", "Shape", "Computer Science", "Visual Perception", "Performance Evaluation" ], "authors": [ { "affiliation": "Face Aging Group, Computer Science Department, UNCW, USA", "fullName": "Amrutha Sethuram", "givenName": "Amrutha", "surname": "Sethuram", "__typename": "ArticleAuthorType" }, { "affiliation": "Face Aging Group, Computer Science Department, UNCW, USA", "fullName": "Karl Ricanek", "givenName": "Karl", "surname": "Ricanek", "__typename": "ArticleAuthorType" }, { "affiliation": "Face Aging Group, Computer Science Department, UNCW, USA", "fullName": "Eric Patterson", "givenName": "Eric", "surname": "Patterson", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-06-01T00:00:00", "pubType": "proceedings", "pages": "100-107", "year": "2010", "issn": "2160-7508", "isbn": "978-1-4244-7029-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05543820", "articleId": "12OmNBaT5Z7", "__typename": "AdjacentArticleType" }, "next": { "fno": "05543825", "articleId": "12OmNwcCIIG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2008/2153/0/04813349", "title": "Evaluating the performance of face-aging algorithms", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813349/12OmNwqfsWu", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813314", "title": "Design sparse features for age estimation using hierarchical face model", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813314/12OmNxG1yWj", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459181", "title": "Learning long term face aging patterns from partially dense aging databases", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459181/12OmNxzMnRp", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a593", "title": "Discriminant Feature Manifold for Facial Aging Estimation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a593/12OmNy4r3Os", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2011/9140/0/05771398", "title": "Facial feature fusion and model selection for age estimation", "doi": null, "abstractUrl": "/proceedings-article/fg/2011/05771398/12OmNzBOhwu", "parentPublication": { "id": "proceedings/fg/2011/9140/0", "title": "Face and Gesture 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1999/0167/0/01670210", "title": "Skin Aging Estimation by Facial Simulation", "doi": null, "abstractUrl": "/proceedings-article/ca/1999/01670210/12OmNzRZpUr", "parentPublication": { "id": "proceedings/ca/1999/0167/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2011/9140/0/05771334", "title": "Kernel spectral regression of perceived age from hybrid facial features", "doi": null, "abstractUrl": "/proceedings-article/fg/2011/05771334/12OmNzlUKLB", "parentPublication": { "id": "proceedings/fg/2011/9140/0", "title": "Face and Gesture 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/12/i2234", "title": "Automatic Age Estimation Based on Facial Aging Patterns", "doi": null, "abstractUrl": "/journal/tp/2007/12/i2234/13rRUwd9CH4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/11/ttp2012112083", "title": "A Concatenational Graph Evolution Aging Model", "doi": null, "abstractUrl": "/journal/tp/2012/11/ttp2012112083/13rRUxOveaZ", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/03/08283774", "title": "Recurrent Face Aging with Hierarchical AutoRegressive Memory", "doi": null, "abstractUrl": "/journal/tp/2019/03/08283774/17D45Vw15vg", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzVGcJn", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "acronym": "fg", "groupId": "1000065", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNro0Iaa", "doi": "10.1109/AFGR.2008.4813336", "title": "Recognizing partial facial action units based on 3D dynamic range data for facial expression recognition", "normalizedTitle": "Recognizing partial facial action units based on 3D dynamic range data for facial expression recognition", "abstract": "Research on automatic facial expression recognition has benefited from work in psychology, specifically the Facial Action Coding System (FACS). To date, most existing approaches are primarily based on 2D images or videos. With the emergence of real-time 3D dynamic imaging technologies, however, 3D dynamic facial data is now available, thus opening up an alternative to detect facial action units in dynamic 3D space. In this paper, we investigate how to use this new modality to improve action unit (AU) detection. We select a subset of AUs from both the upper and lower parts of a facial area, apply the active appearance model (AAM) method and take the correspondence between textures and range models to track the pre-defined facial features across the 3D model sequences. A Hidden Markov Model (HMM) based classifier is employed to recognize the partial AUs. The experiments show that our 3D dynamic tracking based approach outperforms the compared 2D feature tracking based approach. The results are also comparable with the manually-picked 3D facial features based method. Finally, we extend our approach to validate the experiment for recognizing six prototypic facial expressions.", "abstracts": [ { "abstractType": "Regular", "content": "Research on automatic facial expression recognition has benefited from work in psychology, specifically the Facial Action Coding System (FACS). To date, most existing approaches are primarily based on 2D images or videos. With the emergence of real-time 3D dynamic imaging technologies, however, 3D dynamic facial data is now available, thus opening up an alternative to detect facial action units in dynamic 3D space. In this paper, we investigate how to use this new modality to improve action unit (AU) detection. We select a subset of AUs from both the upper and lower parts of a facial area, apply the active appearance model (AAM) method and take the correspondence between textures and range models to track the pre-defined facial features across the 3D model sequences. A Hidden Markov Model (HMM) based classifier is employed to recognize the partial AUs. The experiments show that our 3D dynamic tracking based approach outperforms the compared 2D feature tracking based approach. The results are also comparable with the manually-picked 3D facial features based method. Finally, we extend our approach to validate the experiment for recognizing six prototypic facial expressions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Research on automatic facial expression recognition has benefited from work in psychology, specifically the Facial Action Coding System (FACS). To date, most existing approaches are primarily based on 2D images or videos. With the emergence of real-time 3D dynamic imaging technologies, however, 3D dynamic facial data is now available, thus opening up an alternative to detect facial action units in dynamic 3D space. In this paper, we investigate how to use this new modality to improve action unit (AU) detection. We select a subset of AUs from both the upper and lower parts of a facial area, apply the active appearance model (AAM) method and take the correspondence between textures and range models to track the pre-defined facial features across the 3D model sequences. A Hidden Markov Model (HMM) based classifier is employed to recognize the partial AUs. The experiments show that our 3D dynamic tracking based approach outperforms the compared 2D feature tracking based approach. The results are also comparable with the manually-picked 3D facial features based method. Finally, we extend our approach to validate the experiment for recognizing six prototypic facial expressions.", "fno": "04813336", "keywords": [ "Face Recognition", "Feature Extraction", "Hidden Markov Models", "Image Classification", "Image Texture", "Partial Facial Action Units", "3 D Dynamic Range Data", "Automatic Facial Expression Recognition", "Psychology", "Facial Action Coding System", "Real Time 3 D Dynamic Imaging Technology", "3 D Dynamic Facial Data", "Action Unit Detection", "Active Appearance Model Method", "Image Textures", "Facial Features", "3 D Model Sequences", "Hidden Markov Model Based Classifier", "Dynamic Tracking", "Feature Tracking", "Facial Expressions", "Face Recognition", "Dynamic Range", "Hidden Markov Models", "Space Technology", "Facial Features", "Psychology", "Videos", "Face Detection", "Gold", "Active Appearance Model" ], "authors": [ { "affiliation": "Department of Computer Science, State University of New York at Binghamton, 13902 USA", "fullName": "Yi Sun", "givenName": "Yi", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science, State University of New York at Binghamton, 13902 USA", "fullName": "Michael Reale", "givenName": "Michael", "surname": "Reale", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science, State University of New York at Binghamton, 13902 USA", "fullName": "Lijun Yin", "givenName": "Lijun", "surname": "Yin", "__typename": "ArticleAuthorType" } ], "idPrefix": "fg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-09-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": null, "isbn": "978-1-4244-2153-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04813335", "articleId": "12OmNwG90jp", "__typename": "AdjacentArticleType" }, "next": { "fno": "04813337", "articleId": "12OmNscOUcI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2009/4800/0/05349579", "title": "Pleasure-arousal-dominance driven facial expression simulation", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349579/12OmNBLdKEh", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543263", "title": "Facial action unit detection: 3D versus 2D modality", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543263/12OmNBOCWsc", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2011/9140/0/05771330", "title": "A hierarchical framework for simultaneous facial activity tracking", "doi": null, "abstractUrl": "/proceedings-article/fg/2011/05771330/12OmNwudQQS", "parentPublication": { "id": "proceedings/fg/2011/9140/0", "title": "Face and Gesture 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209e648", "title": "Temporal Facial Expression Modeling for Automated Action Unit Intensity Measurement", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209e648/12OmNxWcHit", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2000/0580/0/05800484", "title": "Recognizing Lower Face Action Units for Facial Expression Analysis", "doi": null, "abstractUrl": "/proceedings-article/fg/2000/05800484/12OmNyvY9zE", "parentPublication": { "id": "proceedings/fg/2000/0580/0", "title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2013/5545/0/06553757", "title": "A unified probabilistic framework for measuring the intensity of spontaneous facial action units", "doi": null, "abstractUrl": "/proceedings-article/fg/2013/06553757/12OmNzAohYC", "parentPublication": { "id": "proceedings/fg/2013/5545/0", "title": "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273630", "title": "Facial action units detection under pose variations using deep regions learning", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273630/12OmNzyp5Vw", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2001/02/i0097", "title": "Recognizing Action Units for Facial Expression Analysis", "doi": null, "abstractUrl": "/journal/tp/2001/02/i0097/13rRUyYjKbm", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000b199", "title": "Unsupervised Features for Facial Expression Intensity Estimation Over Time", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000b199/17D45Wuc37p", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2019/0089/0/08756610", "title": "Facial Action Unit Analysis through 3D Point Cloud Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/fg/2019/08756610/1bzYpAhHf5m", "parentPublication": { "id": "proceedings/fg/2019/0089/0", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx7ouU1", "title": "2010 International Conference on Cyberworlds", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNwwd2In", "doi": "10.1109/CW.2010.65", "title": "Development of a Facial Emotion Recognition Method Based on Combining AAM with DBN", "normalizedTitle": "Development of a Facial Emotion Recognition Method Based on Combining AAM with DBN", "abstract": "In this paper, novel methods for facial emotion recognition in facial image sequences are presented. Our facial emotional feature detection and extracting based on Active Appearance Models (AAM) with Ekman’s Facial Action Coding System (FACS). Our approach to facial emotion recognition lies in the dynamic and probabilistic framework based on Dynamic Bayesian Network (DBN) with Kalman Filter for modeling and understanding the temporal phases of facial expressions in image sequences. By combining AAM and DBN, the proposed method can achieve a higher recognition performance level compare with other facial expression recognition methods. The result on the BioID dataset show a recognition accuracy of more than 90% for facial emotion reasoning using the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, novel methods for facial emotion recognition in facial image sequences are presented. Our facial emotional feature detection and extracting based on Active Appearance Models (AAM) with Ekman’s Facial Action Coding System (FACS). Our approach to facial emotion recognition lies in the dynamic and probabilistic framework based on Dynamic Bayesian Network (DBN) with Kalman Filter for modeling and understanding the temporal phases of facial expressions in image sequences. By combining AAM and DBN, the proposed method can achieve a higher recognition performance level compare with other facial expression recognition methods. The result on the BioID dataset show a recognition accuracy of more than 90% for facial emotion reasoning using the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, novel methods for facial emotion recognition in facial image sequences are presented. Our facial emotional feature detection and extracting based on Active Appearance Models (AAM) with Ekman’s Facial Action Coding System (FACS). Our approach to facial emotion recognition lies in the dynamic and probabilistic framework based on Dynamic Bayesian Network (DBN) with Kalman Filter for modeling and understanding the temporal phases of facial expressions in image sequences. By combining AAM and DBN, the proposed method can achieve a higher recognition performance level compare with other facial expression recognition methods. The result on the BioID dataset show a recognition accuracy of more than 90% for facial emotion reasoning using the proposed method.", "fno": "4215a087", "keywords": [ "Facial Emotion Recognition Facial Feature Extraction", "Active Appearance Model", "Facial Action Coding System", "Dynamic Bayesian Network" ], "authors": [ { "affiliation": null, "fullName": "Kwang-Eun Ko", "givenName": "Kwang-Eun", "surname": "Ko", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kwee-Bo Sim", "givenName": "Kwee-Bo", "surname": "Sim", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-10-01T00:00:00", "pubType": "proceedings", "pages": "87-91", "year": "2010", "issn": null, "isbn": "978-0-7695-4215-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4215a082", "articleId": "12OmNwDAC8Y", "__typename": "AdjacentArticleType" }, "next": { "fno": "4215a092", "articleId": "12OmNAlNiOQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2008/3454/0/3454a220", "title": "A Natural Facial Expression Recognition Using Differential-AAM and k-NNS", "doi": null, "abstractUrl": "/proceedings-article/ism/2008/3454a220/12OmNBWi6Kf", "parentPublication": { "id": "proceedings/ism/2008/3454/0", "title": "2008 Tenth IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imis/2014/4331/0/4331a196", "title": "Facial Feature Extraction Using an Active Appearance Model on the iPhone", "doi": null, "abstractUrl": "/proceedings-article/imis/2014/4331a196/12OmNCbU2X0", "parentPublication": { "id": "proceedings/imis/2014/4331/0", "title": "2014 Eighth International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ems/2011/4619/0/4619a196", "title": "Human-Computer Interaction Using Emotion Recognition from Facial Expression", "doi": null, "abstractUrl": "/proceedings-article/ems/2011/4619a196/12OmNrJ11CY", "parentPublication": { "id": "proceedings/ems/2011/4619/0", "title": "Computer Modeling and Simulation, UKSIM European Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349489", "title": "Evaluating AAM fitting methods for facial expression recognition", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349489/12OmNroij66", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477679", "title": "Multimodal emotion recognition using deep learning architectures", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477679/12OmNviHKjy", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813335", "title": "Automatic bi-modal emotion recognition system based on fusion of facial expressions and emotion extraction from speech", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813335/12OmNwG90jp", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2005/9331/0/01521570", "title": "An intelligent system for facial emotion recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2005/01521570/12OmNy5R3GE", "parentPublication": { "id": "proceedings/icme/2005/9331/0", "title": "2005 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2009/3762/0/3762a885", "title": "Age Estimation Using AAM and Local Facial Features", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2009/3762a885/12OmNyqiaWg", "parentPublication": { "id": "proceedings/iih-msp/2009/3762/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2010/02/ttp2010020258", "title": "A Unified Probabilistic Framework for Spontaneous Facial Action Modeling and Understanding", "doi": null, "abstractUrl": "/journal/tp/2010/02/ttp2010020258/13rRUwInvC2", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/10/i1683", "title": "Facial Action Unit Recognition by Exploiting Their Dynamic and Semantic Relationships", "doi": null, "abstractUrl": "/journal/tp/2007/10/i1683/13rRUwfZBWj", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx8wTfL", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNxRnvPb", "doi": "10.1109/ICPR.2008.4761398", "title": "Facial expression analysis with facial expression deformation", "normalizedTitle": "Facial expression analysis with facial expression deformation", "abstract": "In this paper, we proposes an effective and novel approach to recognize subtle facial expression method which is facial expression deformation. The proposed method deforms subtle facial expressions into corresponding extreme facial expressions. Facial expression deformation processes by extracting subtle motion vector of the predefined feature points and amplifying them. By adding amplified motion vector to Active Appearance Models (AAMs) fitted feature points, the extreme facial expression images is recovered (obtained) by the piece-wise affine warping. After facial expression deformation, we extract the shape and appearance features by projecting deformed facial expression image to the AAM shape and appearance model. We use the multi-class Support Vector Machines (SVMs) to classify the shape and appearance features. The facial expression recognition performance shows promising results of the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we proposes an effective and novel approach to recognize subtle facial expression method which is facial expression deformation. The proposed method deforms subtle facial expressions into corresponding extreme facial expressions. Facial expression deformation processes by extracting subtle motion vector of the predefined feature points and amplifying them. By adding amplified motion vector to Active Appearance Models (AAMs) fitted feature points, the extreme facial expression images is recovered (obtained) by the piece-wise affine warping. After facial expression deformation, we extract the shape and appearance features by projecting deformed facial expression image to the AAM shape and appearance model. We use the multi-class Support Vector Machines (SVMs) to classify the shape and appearance features. The facial expression recognition performance shows promising results of the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we proposes an effective and novel approach to recognize subtle facial expression method which is facial expression deformation. The proposed method deforms subtle facial expressions into corresponding extreme facial expressions. Facial expression deformation processes by extracting subtle motion vector of the predefined feature points and amplifying them. By adding amplified motion vector to Active Appearance Models (AAMs) fitted feature points, the extreme facial expression images is recovered (obtained) by the piece-wise affine warping. After facial expression deformation, we extract the shape and appearance features by projecting deformed facial expression image to the AAM shape and appearance model. We use the multi-class Support Vector Machines (SVMs) to classify the shape and appearance features. The facial expression recognition performance shows promising results of the proposed method.", "fno": "04761398", "keywords": [ "Face Recognition", "Feature Extraction", "Support Vector Machines", "Facial Expression Analysis", "Active Appearance Models", "Support Vector Machines", "Face Recognition", "Active Appearance Model", "Eigenvalues And Eigenfunctions", "Computer Science", "Deformable Models", "Support Vector Machines", "Support Vector Machine Classification", "Parametric Statistics", "Active Shape Model", "Human Robot Interaction" ], "authors": [ { "affiliation": "Dept. of Computer Science & Engineering, POSTECH, San 31 Hyoja-dong, Pohang, Republic of Korea", "fullName": "Sungsoo Park", "givenName": "Sungsoo", "surname": "Park", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Computer Science & Engineering, POSTECH, San 31 Hyoja-dong, Pohang, Republic of Korea", "fullName": "Jongju Shin", "givenName": "Jongju", "surname": "Shin", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Computer Science & Engineering, POSTECH, San 31 Hyoja-dong, Pohang, Republic of Korea", "fullName": "Daijin Kim", "givenName": "Daijin", "surname": "Kim", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": "1051-4651", "isbn": "978-1-4244-2174-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04761397", "articleId": "12OmNCeaPXX", "__typename": "AdjacentArticleType" }, "next": { "fno": "04761399", "articleId": "12OmNz5s0LD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2008/2153/0/04813308", "title": "Spontaneous facial expression classification with facial motion vectors", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813308/12OmNA0MZ3d", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2008/3454/0/3454a220", "title": "A Natural Facial Expression Recognition Using Differential-AAM and k-NNS", "doi": null, "abstractUrl": "/proceedings-article/ism/2008/3454a220/12OmNBWi6Kf", "parentPublication": { "id": "proceedings/ism/2008/3454/0", "title": "2008 Tenth IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2009/3866/0/3866a264", "title": "A Quadratic Deformation Model for Facial Expression Recognition", "doi": null, "abstractUrl": "/proceedings-article/dicta/2009/3866a264/12OmNC4wtDl", "parentPublication": { "id": "proceedings/dicta/2009/3866/0", "title": "2009 Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349489", "title": "Evaluating AAM fitting methods for facial expression recognition", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349489/12OmNroij66", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmeae/2015/8328/0/07386186", "title": "A Block-Wise Deformation-Based Approach for Facial Expression Recognition", "doi": null, "abstractUrl": "/proceedings-article/icmeae/2015/07386186/12OmNweTvPR", "parentPublication": { "id": "proceedings/icmeae/2015/8328/0", "title": "2015 International Conference on Mechatronics, Electronics and Automotive Engineering (ICMEAE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2009/3789/0/3789a044", "title": "Facial Expression Representation Using a Quadratic Deformation Model", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2009/3789a044/12OmNxEBz8F", "parentPublication": { "id": "proceedings/cgiv/2009/3789/0", "title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2007/1834/0/04458169", "title": "Automatic Synthesis of Realistic Facial Expressions", "doi": null, "abstractUrl": "/proceedings-article/isspit/2007/04458169/12OmNxvO08Q", "parentPublication": { "id": "proceedings/isspit/2007/1834/0", "title": "2007 IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813412", "title": "A real-time facial expression recognition system based on Active Appearance Models using gray images and edge images", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813412/12OmNy87Qxh", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460927", "title": "Multi-view facial expression recognition using local appearance features", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460927/12OmNyrqzDG", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ems/2011/4619/0/4619a168", "title": "Facial Expression Recognition in Image Sequences Using Active Shape Model and SVM", "doi": null, "abstractUrl": "/proceedings-article/ems/2011/4619a168/12OmNyrqzwK", "parentPublication": { "id": "proceedings/ems/2011/4619/0", "title": "Computer Modeling and Simulation, UKSIM European Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx0A7K1", "title": "Face and Gesture 2011", "acronym": "fg", "groupId": "1000065", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNy4IF8Y", "doi": "10.1109/FG.2011.5771354", "title": "A SSIM-based approach for finding similar facial expressions", "normalizedTitle": "A SSIM-based approach for finding similar facial expressions", "abstract": "There are various scenarios where finding the most similar expression is the requirement rather than classifying one into discrete, pre-defined classes, for example, for facial expression transfer and facial expression based automatic album generation. This paper proposes a novel method for finding the most similar facial expression. Instead of the regular L2 norm distance, we investigate the use of the Structural SIMilarity (SSIM) metric for similarity comparison as a distance metric in a nearest neighbour unsupervised algorithm. The feature vectors are generated using Active Appearance Models (AAM). We also demonstrate how this technique can be extended and used for finding corresponding facial expression images across two or more subjects, which is useful in applications such as facial animation and automatic expression transfer. Person-independent facial expression performance results are shown on the Multi-PIE, FEEDTUM and AVOZES databases. We also compare the performance of the SSIM metric versus other distance metrics in a nearest neighbour search for finding the most similar facial expression to a given image.", "abstracts": [ { "abstractType": "Regular", "content": "There are various scenarios where finding the most similar expression is the requirement rather than classifying one into discrete, pre-defined classes, for example, for facial expression transfer and facial expression based automatic album generation. This paper proposes a novel method for finding the most similar facial expression. Instead of the regular L2 norm distance, we investigate the use of the Structural SIMilarity (SSIM) metric for similarity comparison as a distance metric in a nearest neighbour unsupervised algorithm. The feature vectors are generated using Active Appearance Models (AAM). We also demonstrate how this technique can be extended and used for finding corresponding facial expression images across two or more subjects, which is useful in applications such as facial animation and automatic expression transfer. Person-independent facial expression performance results are shown on the Multi-PIE, FEEDTUM and AVOZES databases. We also compare the performance of the SSIM metric versus other distance metrics in a nearest neighbour search for finding the most similar facial expression to a given image.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "There are various scenarios where finding the most similar expression is the requirement rather than classifying one into discrete, pre-defined classes, for example, for facial expression transfer and facial expression based automatic album generation. This paper proposes a novel method for finding the most similar facial expression. Instead of the regular L2 norm distance, we investigate the use of the Structural SIMilarity (SSIM) metric for similarity comparison as a distance metric in a nearest neighbour unsupervised algorithm. The feature vectors are generated using Active Appearance Models (AAM). We also demonstrate how this technique can be extended and used for finding corresponding facial expression images across two or more subjects, which is useful in applications such as facial animation and automatic expression transfer. Person-independent facial expression performance results are shown on the Multi-PIE, FEEDTUM and AVOZES databases. We also compare the performance of the SSIM metric versus other distance metrics in a nearest neighbour search for finding the most similar facial expression to a given image.", "fno": "05771354", "keywords": [ "Computer Animation", "Emotion Recognition", "Face Recognition", "SSIM Based Approach", "Structural Similarity Metric", "Similar Facial Expressions", "Automatic Album Generation", "Regular L 2 Norm Distance", "Similarity Comparison", "Nearest Neighbour Unsupervised Algorithm", "Feature Vectors", "Active Appearance Models", "Facial Animation", "Automatic Expression Transfer", "Multi PIE Database", "FEEDTUM Database", "AVOZES Database", "Face", "Feature Extraction", "Measurement", "Active Appearance Model", "Shape", "Training", "Mouth" ], "authors": [ { "affiliation": "School of Computer Science, Australian National University, Australia", "fullName": "Abhinav Dhall", "givenName": "Abhinav", "surname": "Dhall", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Engineering, CECS, Australian National University, Australia", "fullName": "Akshay Asthana", "givenName": "Akshay", "surname": "Asthana", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science, Australian National University, Australia", "fullName": "Roland Goecke", "givenName": "Roland", "surname": "Goecke", "__typename": "ArticleAuthorType" } ], "idPrefix": "fg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-03-01T00:00:00", "pubType": "proceedings", "pages": "815-820", "year": "2011", "issn": null, "isbn": "978-1-4244-9140-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05771353", "articleId": "12OmNAqU4VX", "__typename": "AdjacentArticleType" }, "next": { "fno": "05771355", "articleId": "12OmNy4IEVD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2008/2153/0/04813308", "title": "Spontaneous facial expression classification with facial motion vectors", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813308/12OmNA0MZ3d", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543269", "title": "Facial expression invariant head pose normalization using Gaussian Process Regression", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543269/12OmNAjO6Ep", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2008/3454/0/3454a220", "title": "A Natural Facial Expression Recognition Using Differential-AAM and k-NNS", "doi": null, "abstractUrl": "/proceedings-article/ism/2008/3454a220/12OmNBWi6Kf", "parentPublication": { "id": "proceedings/ism/2008/3454/0", "title": "2008 Tenth IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imis/2014/4331/0/4331a196", "title": "Facial Feature Extraction Using an Active Appearance Model on the iPhone", "doi": null, "abstractUrl": "/proceedings-article/imis/2014/4331a196/12OmNCbU2X0", "parentPublication": { "id": "proceedings/imis/2014/4331/0", "title": "2014 Eighth International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgciot/2015/7910/0/07380607", "title": "Facial expression recognition using VFC and snakes", "doi": null, "abstractUrl": "/proceedings-article/icgciot/2015/07380607/12OmNCdBDYo", "parentPublication": { "id": "proceedings/icgciot/2015/7910/0", "title": "2015 International Conference on Green Computing and Internet of Things (ICGCIoT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273635", "title": "Refactoring facial expressions: An automatic analysis of natural occurring facial expressions in iterative social dilemma", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273635/12OmNCeK2cX", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2011/9140/0/05771351", "title": "Individual identification based on facial dynamics during expressions using active-appearance-based Hidden Markov Models", "doi": null, "abstractUrl": "/proceedings-article/fg/2011/05771351/12OmNroij0C", "parentPublication": { "id": "proceedings/fg/2011/9140/0", "title": "Face and Gesture 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761398", "title": "Facial expression analysis with facial expression deformation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761398/12OmNxRnvPb", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2007/1834/0/04458169", "title": "Automatic Synthesis of Realistic Facial Expressions", "doi": null, "abstractUrl": "/proceedings-article/isspit/2007/04458169/12OmNxvO08Q", "parentPublication": { "id": "proceedings/isspit/2007/1834/0", "title": "2007 IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2011/9140/0/05771465", "title": "Recognizing facial expressions from 3D video: Current results and future prospects", "doi": null, "abstractUrl": "/proceedings-article/fg/2011/05771465/12OmNzkMlGa", "parentPublication": { "id": "proceedings/fg/2011/9140/0", "title": "Face and Gesture 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx0A7K1", "title": "Face and Gesture 2011", "acronym": "fg", "groupId": "1000065", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNzBOhwu", "doi": "10.1109/FG.2011.5771398", "title": "Facial feature fusion and model selection for age estimation", "normalizedTitle": "Facial feature fusion and model selection for age estimation", "abstract": "Automatic face age estimation is challenging due to its complexity owing to genetic difference, behavior and environmental factors, the dynamics of facial aging between different individuals, etc. In this work we propose to fuse the global facial feature extracted from Active Appearance Model (AAM) and the local facial features extracted from Local Binary Pattern (LBP), as the representation of faces. Furthermore, we introduce an advanced age estimation system combining feature fusion and model selection schemes such as Least Angle Regression (LAR) and sequential approaches. Due to the fact that different facial feature representations may come with various types of measurement scales, we compare multiple normalization schemes for both facial features. We demonstrate that the feature fusion with model selection can achieve significant improvement in age estimation over single feature representation alone. Our experiment on multi-ethnicity UIUC-PAL database suggests that age estimation with feature fusion and model selection outperforms the single feature, or the full feature model.", "abstracts": [ { "abstractType": "Regular", "content": "Automatic face age estimation is challenging due to its complexity owing to genetic difference, behavior and environmental factors, the dynamics of facial aging between different individuals, etc. In this work we propose to fuse the global facial feature extracted from Active Appearance Model (AAM) and the local facial features extracted from Local Binary Pattern (LBP), as the representation of faces. Furthermore, we introduce an advanced age estimation system combining feature fusion and model selection schemes such as Least Angle Regression (LAR) and sequential approaches. Due to the fact that different facial feature representations may come with various types of measurement scales, we compare multiple normalization schemes for both facial features. We demonstrate that the feature fusion with model selection can achieve significant improvement in age estimation over single feature representation alone. Our experiment on multi-ethnicity UIUC-PAL database suggests that age estimation with feature fusion and model selection outperforms the single feature, or the full feature model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Automatic face age estimation is challenging due to its complexity owing to genetic difference, behavior and environmental factors, the dynamics of facial aging between different individuals, etc. In this work we propose to fuse the global facial feature extracted from Active Appearance Model (AAM) and the local facial features extracted from Local Binary Pattern (LBP), as the representation of faces. Furthermore, we introduce an advanced age estimation system combining feature fusion and model selection schemes such as Least Angle Regression (LAR) and sequential approaches. Due to the fact that different facial feature representations may come with various types of measurement scales, we compare multiple normalization schemes for both facial features. We demonstrate that the feature fusion with model selection can achieve significant improvement in age estimation over single feature representation alone. Our experiment on multi-ethnicity UIUC-PAL database suggests that age estimation with feature fusion and model selection outperforms the single feature, or the full feature model.", "fno": "05771398", "keywords": [ "Feature Extraction", "Regression Analysis", "Visual Databases", "Facial Feature Fusion", "Model Selection Scheme", "Automatic Face Age Estimation", "Global Facial Feature Extraction", "Active Appearance Model", "Local Facial Features Extraction", "Local Binary Pattern", "Least Angle Regression", "Multiethnicity UIUC PAL Database", "Active Appearance Model", "Databases", "Aging" ], "authors": [ { "affiliation": "University of North Carolina Wilmington", "fullName": "Cuixian Chen", "givenName": "Cuixian", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina Wilmington", "fullName": "Wankou Yang", "givenName": "Wankou", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina Wilmington", "fullName": "Yishi Wang", "givenName": "Yishi", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina Wilmington", "fullName": "Karl Ricanek", "givenName": "Karl", "surname": "Ricanek", "__typename": "ArticleAuthorType" }, { "affiliation": "Concordia University", "fullName": "Khoa Luu", "givenName": "Khoa", "surname": "Luu", "__typename": "ArticleAuthorType" } ], "idPrefix": "fg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-03-01T00:00:00", "pubType": "proceedings", "pages": "200-205", "year": "2011", "issn": null, "isbn": "978-1-4244-9140-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05771397", "articleId": "12OmNvlPkAd", "__typename": "AdjacentArticleType" }, "next": { "fno": "05771399", "articleId": "12OmNx8OuqM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2010/7029/0/05543820", "title": "Face age estimation using model selection", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543820/12OmNBaT5Z7", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460631", "title": "Learning distance metric regression for facial age estimation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460631/12OmNs59JRL", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813314", "title": "Design sparse features for age estimation using hierarchical face model", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813314/12OmNxG1yWj", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2009/3762/0/3762a885", "title": "Age Estimation Using AAM and Local Facial Features", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2009/3762a885/12OmNyqiaWg", "parentPublication": { "id": "proceedings/iih-msp/2009/3762/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiccsa/2016/4320/0/07945649", "title": "Age estimation using local matched filter binary pattern", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2016/07945649/12OmNz2kqlz", "parentPublication": { "id": "proceedings/aiccsa/2016/4320/0", "title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2015/6026/1/07163157", "title": "FG2015 age progression evaluation", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163157/12OmNzICESE", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visapp/2014/8133/1/07294873", "title": "An investigation on local wrinkle-based extractor of age estimation", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07294873/12OmNzd7bAI", "parentPublication": { "id": "proceedings/visapp/2014/8133/1", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2011/9140/0/05771334", "title": "Kernel spectral regression of perceived age from hybrid facial features", "doi": null, "abstractUrl": "/proceedings-article/fg/2011/05771334/12OmNzlUKLB", "parentPublication": { "id": "proceedings/fg/2011/9140/0", "title": "Face and Gesture 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2013/5545/0/06553772", "title": "Multi-feature ordinal ranking for facial age estimation", "doi": null, "abstractUrl": "/proceedings-article/fg/2013/06553772/12OmNzxgHzI", "parentPublication": { "id": "proceedings/fg/2013/5545/0", "title": "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse-euc/2017/3220/1/08005833", "title": "Partial Least Squares Regression Based Facial Age Estimation", "doi": null, "abstractUrl": "/proceedings-article/cse-euc/2017/08005833/17D45XERmmq", "parentPublication": { "id": "proceedings/cse-euc/2017/3220/1", "title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvRU0lf", "title": "HPCMP Users Group Conference", "acronym": "hpcmp-ugc", "groupId": "1002962", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNwvVrCm", "doi": "10.1109/HPCMP-UGC.2009.16", "title": "Implicit LES Computations with Applications to Micro Air Vehicles", "normalizedTitle": "Implicit LES Computations with Applications to Micro Air Vehicles", "abstract": "Implicit large eddy simulation (ILES) computations have been performed for canonical model problems associated with flexible, flapping-wing micro air vehicles. This computationally intensive approach, which is able to directly model laminar/transitional/turbulent flowfields, requires the use of the best high performance computational platforms available. Computations are first performed for an SD7003 airfoil section at αo=4° plunging with reduced frequency k=3.93 and amplitude ho=0.05. For Rec=4×104, the dynamic-stall vortex system is laminar at inception, but experiences an abrupt breakdown associated with the onset of spanwise instability effects. The aerodynamics solver is then coupled with a nonlinear finite element solver to compute the flow over a flexible membrane wing. A description of the unsteady fluid/structure interaction for α=14° is presented indicating a close coupling between the unsteady flow behavior and the structural response. Good agreement of the computed results with available experimental measurements is shown for both problems considered.", "abstracts": [ { "abstractType": "Regular", "content": "Implicit large eddy simulation (ILES) computations have been performed for canonical model problems associated with flexible, flapping-wing micro air vehicles. This computationally intensive approach, which is able to directly model laminar/transitional/turbulent flowfields, requires the use of the best high performance computational platforms available. Computations are first performed for an SD7003 airfoil section at αo=4° plunging with reduced frequency k=3.93 and amplitude ho=0.05. For Rec=4×104, the dynamic-stall vortex system is laminar at inception, but experiences an abrupt breakdown associated with the onset of spanwise instability effects. The aerodynamics solver is then coupled with a nonlinear finite element solver to compute the flow over a flexible membrane wing. A description of the unsteady fluid/structure interaction for α=14° is presented indicating a close coupling between the unsteady flow behavior and the structural response. Good agreement of the computed results with available experimental measurements is shown for both problems considered.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Implicit large eddy simulation (ILES) computations have been performed for canonical model problems associated with flexible, flapping-wing micro air vehicles. This computationally intensive approach, which is able to directly model laminar/transitional/turbulent flowfields, requires the use of the best high performance computational platforms available. Computations are first performed for an SD7003 airfoil section at αo=4° plunging with reduced frequency k=3.93 and amplitude ho=0.05. For Rec=4×104, the dynamic-stall vortex system is laminar at inception, but experiences an abrupt breakdown associated with the onset of spanwise instability effects. The aerodynamics solver is then coupled with a nonlinear finite element solver to compute the flow over a flexible membrane wing. A description of the unsteady fluid/structure interaction for α=14° is presented indicating a close coupling between the unsteady flow behavior and the structural response. Good agreement of the computed results with available experimental measurements is shown for both problems considered.", "fno": "3946a073", "keywords": [ "Vortices", "Aerodynamics", "Aerospace Components", "Computational Fluid Dynamics", "Finite Element Analysis", "Flexible Structures", "Flow Instability", "Flow Simulation", "Laminar To Turbulent Transitions", "Structural Response", "Implicit Large Eddy Simulation Computation", "Canonical Model Problem", "Flexible Flapping Wing Micro Air Vehicle", "Turbulent Flow Field", "Laminar Flow Field", "Transitional Flow Field", "SD 7003 Airfoil Section", "Dynamic Stall Vortex System", "Spanwise Instability Effects", "Aerodynamics Solver", "Nonlinear Finite Element Solver", "Flexible Membrane Wing", "Unsteady Fluid Structure Interaction", "Biomembranes", "Automotive Components", "Computational Modeling", "Aerodynamics", "Atmospheric Modeling", "Accuracy", "Mathematical Model" ], "authors": [], "idPrefix": "hpcmp-ugc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-06-01T00:00:00", "pubType": "proceedings", "pages": "73-80", "year": "2009", "issn": null, "isbn": "978-0-7695-3946-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3946a067", "articleId": "12OmNqAU6FG", "__typename": "AdjacentArticleType" }, "next": { "fno": "3946a081", "articleId": "12OmNxRF77L", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dodugc/2005/2496/0/24960128", "title": "Higher-Order Accurate Computations of Maneuvering Unmanned Combat Air Vehicle Configurations", "doi": null, "abstractUrl": "/proceedings-article/dodugc/2005/24960128/12OmNAWYKIc", "parentPublication": { "id": "proceedings/dodugc/2005/2496/0", "title": "2005 Users Group Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aero/2011/7350/0/05747497", "title": "Towards energy efficiency in micro hovering air vehicles", "doi": null, "abstractUrl": "/proceedings-article/aero/2011/05747497/12OmNAkWviX", "parentPublication": { "id": "proceedings/aero/2011/7350/0", "title": "IEEE Aerospace Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcmp-ugc/2010/986/0/06017983", "title": "Computational Analysis for Air/Ship Integration: 1st Year Report", "doi": null, "abstractUrl": "/proceedings-article/hpcmp-ugc/2010/06017983/12OmNAnuTmf", "parentPublication": { "id": "proceedings/hpcmp-ugc/2010/986/0", "title": "2010 DoD High Performance Computing Modernization Program Users Group Conference (HPCMP-UGC 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcmp-ugc/2008/3515/0/3515a035", "title": "High-Fidelity Computations of Moving and Flexible Wing Sections with Application to Micro Air Vehicles", "doi": null, "abstractUrl": "/proceedings-article/hpcmp-ugc/2008/3515a035/12OmNqBbHz4", "parentPublication": { "id": "proceedings/hpcmp-ugc/2008/3515/0", "title": "HPCMP Users Group Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcmp-ugc/2009/3946/0/3946a037", "title": "Control of Boundary-Layer Separation for Lifting Surfaces", "doi": null, "abstractUrl": "/proceedings-article/hpcmp-ugc/2009/3946a037/12OmNrMZpvB", "parentPublication": { "id": "proceedings/hpcmp-ugc/2009/3946/0", "title": "HPCMP Users Group Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iros/1995/7108/2/71082230", "title": "Aerodynamic behavior of microstructures", "doi": null, "abstractUrl": "/proceedings-article/iros/1995/71082230/12OmNvA1h6T", "parentPublication": { "id": "proceedings/iros/1995/7108/2", "title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcmp-ugc/2010/986/0/06017995", "title": "High-Fidelity Computations for Flexible Micro Air Vehicle Applications", "doi": null, "abstractUrl": "/proceedings-article/hpcmp-ugc/2010/06017995/12OmNvAiSFV", "parentPublication": { "id": "proceedings/hpcmp-ugc/2010/986/0", "title": "2010 DoD High Performance Computing Modernization Program Users Group Conference (HPCMP-UGC 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcmp-ugc/2007/3088/0/30880033", "title": "Computations of a Maneuvering Unmanned Combat Air Vehicle Using a High-Order Overset Grid Method", "doi": null, "abstractUrl": "/proceedings-article/hpcmp-ugc/2007/30880033/12OmNvlPkzj", "parentPublication": { "id": "proceedings/hpcmp-ugc/2007/3088/0", "title": "HPCMP Users Group Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robio/2006/0570/0/04141906", "title": "From Natural Flyers to the Mechanical Realization of a Flapping Wing Micro Air Vehicle", "doi": null, "abstractUrl": "/proceedings-article/robio/2006/04141906/12OmNyywxFl", "parentPublication": { "id": "proceedings/robio/2006/0570/0", "title": "IEEE International Conference on Robotics and Biomimetics - ROBIO2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122071", "title": "Vortex Visualization in Ultra Low Reynolds Number Insect Flight", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122071/13rRUILc8f9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNviZlGi", "title": "High Performance Computing and Grid in Asia Pacific Region, International Conference on", "acronym": "hpcasia", "groupId": "1000321", "volume": "0", "displayVolume": "0", "year": "1997", "__typename": "ProceedingType" }, "article": { "id": "12OmNxzMnPS", "doi": "10.1109/HPC.1997.592184", "title": "Computational Method for the Prediction of Dynamic Response of Long-Span Bridges due to Unsteady Wind Load", "normalizedTitle": "Computational Method for the Prediction of Dynamic Response of Long-Span Bridges due to Unsteady Wind Load", "abstract": "A two-step computational method is proposed to predict the dynamic response of the long-span bridge structures by unsteady wind loads due to vortex-shedding. Computational fluid dynamics analysis using two-dimensional model of the bridge deck section are carried out to evaluate the unsteady wind loads on the bridge deck. Three-dimensional dynamic analysis of the bridge under unsteady wind loads are followed to investigate vortex-excited oscillations of the bridge. For an economical application of the computational method proposed, an approximate calculation method of unsteady wind loads is presented. Reasonable agreements are obtained between the predictions by the computational analyses and the existing wind tunnel measurements, and between the prediction by unsteady wind loads calculated through computational fluid dynamics and the prediction by approximated unsteady wind loads.", "abstracts": [ { "abstractType": "Regular", "content": "A two-step computational method is proposed to predict the dynamic response of the long-span bridge structures by unsteady wind loads due to vortex-shedding. Computational fluid dynamics analysis using two-dimensional model of the bridge deck section are carried out to evaluate the unsteady wind loads on the bridge deck. Three-dimensional dynamic analysis of the bridge under unsteady wind loads are followed to investigate vortex-excited oscillations of the bridge. For an economical application of the computational method proposed, an approximate calculation method of unsteady wind loads is presented. Reasonable agreements are obtained between the predictions by the computational analyses and the existing wind tunnel measurements, and between the prediction by unsteady wind loads calculated through computational fluid dynamics and the prediction by approximated unsteady wind loads.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A two-step computational method is proposed to predict the dynamic response of the long-span bridge structures by unsteady wind loads due to vortex-shedding. Computational fluid dynamics analysis using two-dimensional model of the bridge deck section are carried out to evaluate the unsteady wind loads on the bridge deck. Three-dimensional dynamic analysis of the bridge under unsteady wind loads are followed to investigate vortex-excited oscillations of the bridge. For an economical application of the computational method proposed, an approximate calculation method of unsteady wind loads is presented. Reasonable agreements are obtained between the predictions by the computational analyses and the existing wind tunnel measurements, and between the prediction by unsteady wind loads calculated through computational fluid dynamics and the prediction by approximated unsteady wind loads.", "fno": "79010419", "keywords": [ "Long Span Bridge", "Vortex Excited Oscillation", "Vortex Shedding", "Turbulent Flow", "Computational Method" ], "authors": [ { "affiliation": "Systems Engineering Research Institute", "fullName": "Jae Seok Lee", "givenName": "Jae Seok", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "Systems Engineering Research Institute", "fullName": "Sangsan Lee", "givenName": "Sangsan", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "hpcasia", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1997-04-01T00:00:00", "pubType": "proceedings", "pages": "419", "year": "1997", "issn": null, "isbn": "0-8186-7901-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "79010413", "articleId": "12OmNy3Agp2", "__typename": "AdjacentArticleType" }, "next": { "fno": "79010425", "articleId": "12OmNzSh1am", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyaXPPN", "title": "2009 Second International Conference on Information and Computing Science", "acronym": "icic", "groupId": "1002818", "volume": "4", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNy4IEY3", "doi": "10.1109/ICIC.2009.401", "title": "Study on Numerical Simulation of Single-Phase Injection Device Flow Flied", "normalizedTitle": "Study on Numerical Simulation of Single-Phase Injection Device Flow Flied", "abstract": "The injection device is widely used in various industrial fields. The liquid-liquid single-phase flow flied of injection device was simulated with computational fluid dynamics (CFD) software FLUENT based on standard K-epsiv two-equation turbulent mode. The results showed that: the fluid movement disorder at the entrance of mixing chamber and is even more confusion when into the interior with the phenomenon of vortex separation and fluid corner separation.", "abstracts": [ { "abstractType": "Regular", "content": "The injection device is widely used in various industrial fields. The liquid-liquid single-phase flow flied of injection device was simulated with computational fluid dynamics (CFD) software FLUENT based on standard K-epsiv two-equation turbulent mode. The results showed that: the fluid movement disorder at the entrance of mixing chamber and is even more confusion when into the interior with the phenomenon of vortex separation and fluid corner separation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The injection device is widely used in various industrial fields. The liquid-liquid single-phase flow flied of injection device was simulated with computational fluid dynamics (CFD) software FLUENT based on standard K-epsiv two-equation turbulent mode. The results showed that: the fluid movement disorder at the entrance of mixing chamber and is even more confusion when into the interior with the phenomenon of vortex separation and fluid corner separation.", "fno": "3634d358", "keywords": [ "Boundary Layer Turbulence", "Computational Fluid Dynamics", "Jets", "Mixing", "Navier Stokes Equations", "Vortices", "Numerical Simulation", "Single Phase Injection Device Flow Field", "Liquid Liquid Single Phase Flow Flied", "Computational Fluid Dynamics", "FLUENT CFD Software", "Kappa Minus Epsilon Two Equation Turbulent Mode", "Fluid Movement Disorder", "Mixing Chamber Entrance", "Vortex Separation", "Fluid Corner Separation", "Numerical Simulation", "Equations", "Computational Fluid Dynamics", "Viscosity", "Chemical Engineering", "Chemical Technology", "Chemical Industry", "Fluid Flow", "Computational Modeling", "Software Standards", "Liquid Liquid Injection Device", "Computational Fluid Dynamics CFD", "Single Phase Flow Flied", "Numerical Simulation" ], "authors": [ { "affiliation": "Sch. of Chem. Eng., Hebei Univ. of Technol., Tianjin, China", "fullName": "Wen-yi Chen", "givenName": "Wen-yi", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Peking University", "fullName": "Yi-ran An", "givenName": "Yi-ran", "surname": "An", "__typename": "ArticleAuthorType" }, { "affiliation": "Tianjin University", "fullName": "Nan Jiang", "givenName": "Nan", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "Sch. of Chem. Eng., Hebei Univ. of Technol., Tianjin, China", "fullName": "Qing-hui Yuan", "givenName": "Qing-hui", "surname": "Yuan", "__typename": "ArticleAuthorType" } ], "idPrefix": "icic", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-05-01T00:00:00", "pubType": "proceedings", "pages": "358-361", "year": "2009", "issn": "2160-7443", "isbn": "978-0-7695-3634-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3634d296", "articleId": "12OmNy2Jtbd", "__typename": "AdjacentArticleType" }, "next": { "fno": "3634d300", "articleId": "12OmNzdoMvR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wcse/2009/3570/2/3570b395", "title": "Numerical Simulation of Field Flow within Hydrocyclone", "doi": null, "abstractUrl": "/proceedings-article/wcse/2009/3570b395/12OmNCykm83", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2015/7644/0/7644a275", "title": "CFD Numerical Analysis of the Interaction Between Wave and Pier", "doi": null, "abstractUrl": "/proceedings-article/icicta/2015/7644a275/12OmNrAdsx1", "parentPublication": { "id": "proceedings/icicta/2015/7644/0", "title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoip/2010/4252/1/4252a716", "title": "Modeling of Fuel Injection Process under Two-Phase Condition", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252a716/12OmNrkBwqW", "parentPublication": { "id": "proceedings/icoip/2010/4252/2", "title": "Optoelectronics and Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccie/2010/4026/1/4026a201", "title": "Study on the rheological property of the raw slurry", "doi": null, "abstractUrl": "/proceedings-article/ccie/2010/4026a201/12OmNwE9OCH", "parentPublication": { "id": "proceedings/ccie/2010/4026/1", "title": "Computing, Control and Industrial Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbeb/2012/4706/0/4706b070", "title": "Numerical Simulation of Descending Curves Sinusoidal Microchannel for Cell Separation System", "doi": null, "abstractUrl": "/proceedings-article/icbeb/2012/4706b070/12OmNwvDQuL", "parentPublication": { "id": "proceedings/icbeb/2012/4706/0", "title": "Biomedical Engineering and Biotechnology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbec/2016/2132/0/07459026", "title": "Characterization of a Shear Thinning Fluid System for Cardiovascular Medical Device Assessment", "doi": null, "abstractUrl": "/proceedings-article/sbec/2016/07459026/12OmNx57HR5", "parentPublication": { "id": "proceedings/sbec/2016/2132/0", "title": "2016 32nd Southern Biomedical Engineering Conference (SBEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifita/2009/3600/2/3600b749", "title": "Numerical Simulation and Visualization of Thermal and Flow Fields of MOCVD", "doi": null, "abstractUrl": "/proceedings-article/ifita/2009/3600b749/12OmNyqzLUP", "parentPublication": { "id": "proceedings/ifita/2009/3600/2", "title": "2009 International Forum on Information Technology and Applications (IFITA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2010/4270/0/4270a242", "title": "Numerical Study of Stokes' Second Flow Problem", "doi": null, "abstractUrl": "/proceedings-article/iccis/2010/4270a242/12OmNzTYBXT", "parentPublication": { "id": "proceedings/iccis/2010/4270/0", "title": "2010 International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccbd/2022/5716/0/10080788", "title": "Numerical Simulation on Drag Reduction of Micro-grooved Surface", "doi": null, "abstractUrl": "/proceedings-article/iccbd/2022/10080788/1LSP5NEHXq0", "parentPublication": { "id": "proceedings/iccbd/2022/5716/0", "title": "2022 5th International Conference on Computing and Big Data (ICCBD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cscc/2020/6503/0/650300a165", "title": "Pressure analysis of rectangular fluid filling", "doi": null, "abstractUrl": "/proceedings-article/cscc/2020/650300a165/1t2mRuVIpwI", "parentPublication": { "id": "proceedings/cscc/2020/6503/0", "title": "2020 24th International Conference on Circuits, Systems, Communications and Computers (CSCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }