data
dict
{ "proceeding": { "id": "12OmNwdbV00", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNvk7JWd", "doi": "10.1109/CVPR.2012.6247788", "title": "Markov Weight Fields for face sketch synthesis", "normalizedTitle": "Markov Weight Fields for face sketch synthesis", "abstract": "Great progress has been made in face sketch synthesis in recent years. State-of-the-art methods commonly apply a Markov Random Fields (MRF) model to select local sketch patches from a set of training data. Such methods, however, have two major drawbacks. Firstly, the MRF model used cannot synthesize new sketch patches. Secondly, the optimization problem in solving the MRF is NP-hard. In this paper, we propose a novel Markov Weight Fields (MWF) model that is capable of synthesizing new sketch patches. We formulate our model into a convex quadratic programming (QP) problem to which the optimal solution is guaranteed. Based on the Markov property of our model, we further propose a cascade decomposition method (CDM) for solving such a large scale QP problem efficiently. Experimental results on the CUHK face sketch database and celebrity photos show that our model outperforms the common MRF model used in other state-of-the-art methods.", "abstracts": [ { "abstractType": "Regular", "content": "Great progress has been made in face sketch synthesis in recent years. State-of-the-art methods commonly apply a Markov Random Fields (MRF) model to select local sketch patches from a set of training data. Such methods, however, have two major drawbacks. Firstly, the MRF model used cannot synthesize new sketch patches. Secondly, the optimization problem in solving the MRF is NP-hard. In this paper, we propose a novel Markov Weight Fields (MWF) model that is capable of synthesizing new sketch patches. We formulate our model into a convex quadratic programming (QP) problem to which the optimal solution is guaranteed. Based on the Markov property of our model, we further propose a cascade decomposition method (CDM) for solving such a large scale QP problem efficiently. Experimental results on the CUHK face sketch database and celebrity photos show that our model outperforms the common MRF model used in other state-of-the-art methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Great progress has been made in face sketch synthesis in recent years. State-of-the-art methods commonly apply a Markov Random Fields (MRF) model to select local sketch patches from a set of training data. Such methods, however, have two major drawbacks. Firstly, the MRF model used cannot synthesize new sketch patches. Secondly, the optimization problem in solving the MRF is NP-hard. In this paper, we propose a novel Markov Weight Fields (MWF) model that is capable of synthesizing new sketch patches. We formulate our model into a convex quadratic programming (QP) problem to which the optimal solution is guaranteed. Based on the Markov property of our model, we further propose a cascade decomposition method (CDM) for solving such a large scale QP problem efficiently. Experimental results on the CUHK face sketch database and celebrity photos show that our model outperforms the common MRF model used in other state-of-the-art methods.", "fno": "138P1C30", "keywords": [ "Quadratic Programming", "Computational Complexity", "Convex Programming", "Image Matching", "Markov Processes", "Celebrity Photos", "Markov Weight Fields Model", "Face Sketch Synthesis", "MRF", "Markov Random Fields Model", "Local Sketch Patches", "Optimization Problem", "NP Hard", "MWF", "Convex Quadratic Programming Problem", "Cascade Decomposition Method", "CDM", "CUHK Face Sketch Database", "Face", "Training Data", "Optimization", "Markov Processes", "Computational Modeling", "Vectors", "Databases" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Univ. of Hong Kong, Hong Kong, China", "fullName": "K. K. Wong", "givenName": "K. K.", "surname": "Wong", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Univ. of Hong Kong, Hong Kong, China", "fullName": "Zhanghui Kuang", "givenName": null, "surname": "Zhanghui Kuang", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Univ. of Hong Kong, Hong Kong, China", "fullName": "Hao Zhou", "givenName": null, "surname": "Hao Zhou", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-06-01T00:00:00", "pubType": "proceedings", "pages": "1091-1097", "year": "2012", "issn": "1063-6919", "isbn": "978-1-4673-1226-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "137P1C29", "articleId": "12OmNAIdBP6", "__typename": "AdjacentArticleType" }, "next": { "fno": "139P1C31", "articleId": "12OmNwbLVtg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2011/4541/0/4541a412", "title": "Local Regression Model for Automatic Face Sketch Generation", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a412/12OmNAq3hxY", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/2/114320680", "title": "Markov Face Models", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/114320680/12OmNwFRpav", "parentPublication": { "id": "proceedings/iccv/2001/1143/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2011/4541/0/4541a082", "title": "Face Sketch-Photo Synthesis under Multi-dictionary Sparse Representation Framework", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a082/12OmNx0RIMA", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/3/212830157", "title": "A Hybrid Face Recognition Method using Markov Random Fields", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212830157/12OmNz61dFD", "parentPublication": { "id": "proceedings/icpr/2004/2128/3", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109c146", "title": "Face Sketch Synthesis via Sparse Representation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109c146/12OmNzUgdi2", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1993/4120/0/00342457", "title": "Phase transitions and multi-scale Markov random fields: application to texture synthesis", "doi": null, "abstractUrl": "/proceedings-article/acssc/1993/00342457/12OmNzcxZ2v", "parentPublication": { "id": "proceedings/acssc/1993/4120/0", "title": "Proceedings of 27th Asilomar Conference on Signals, Systems and Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/02/07434636", "title": "Graphical Representation for Heterogeneous Face Recognition", "doi": null, "abstractUrl": "/journal/tp/2017/02/07434636/13rRUwhHcS4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1991/08/i0819", "title": "Generating Connected Textured Fractal Patterns Using Markov Random Fields", "doi": null, "abstractUrl": "/journal/tp/1991/08/i0819/13rRUxASuqh", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1995/04/i0391", "title": "Tree Approximations to Markov Random Fields", "doi": null, "abstractUrl": "/journal/tp/1995/04/i0391/13rRUxCitKg", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/11/ttp2009111955", "title": "Face Photo-Sketch Synthesis and Recognition", "doi": null, "abstractUrl": "/journal/tp/2009/11/ttp2009111955/13rRUxOveaX", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqAU6sR", "title": "Image and Graphics, International Conference on", "acronym": "icig", "groupId": "1001790", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNx0RIMA", "doi": "10.1109/ICIG.2011.112", "title": "Face Sketch-Photo Synthesis under Multi-dictionary Sparse Representation Framework", "normalizedTitle": "Face Sketch-Photo Synthesis under Multi-dictionary Sparse Representation Framework", "abstract": "Sketch-photo synthesis is one of the important research issues of heterogeneous image transformation. Some available popular synthesis methods, like locally linear embedding (LLE), usually generate sketches or photos with lower definition and blurred details, which reduces the visual quality and the recognition rate across the heterogeneous images. In order to improve the quality of the synthesized images, a multi-dictionary sparse representation based face sketch-photo synthesis model is constructed. In the proposed model, LLE is used to estimate an initial sketch or photo, while the multi-dictionary sparse representation model is applied to generate the high frequency and detail information. Finally, by linear superimposing, the enhanced face sketch or photo can be obtained. Experimental results show that sketches and photos synthesized by the proposed method have higher definition and much richer detail information resulting in a higher face recognition rate between sketches and photos.", "abstracts": [ { "abstractType": "Regular", "content": "Sketch-photo synthesis is one of the important research issues of heterogeneous image transformation. Some available popular synthesis methods, like locally linear embedding (LLE), usually generate sketches or photos with lower definition and blurred details, which reduces the visual quality and the recognition rate across the heterogeneous images. In order to improve the quality of the synthesized images, a multi-dictionary sparse representation based face sketch-photo synthesis model is constructed. In the proposed model, LLE is used to estimate an initial sketch or photo, while the multi-dictionary sparse representation model is applied to generate the high frequency and detail information. Finally, by linear superimposing, the enhanced face sketch or photo can be obtained. Experimental results show that sketches and photos synthesized by the proposed method have higher definition and much richer detail information resulting in a higher face recognition rate between sketches and photos.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Sketch-photo synthesis is one of the important research issues of heterogeneous image transformation. Some available popular synthesis methods, like locally linear embedding (LLE), usually generate sketches or photos with lower definition and blurred details, which reduces the visual quality and the recognition rate across the heterogeneous images. In order to improve the quality of the synthesized images, a multi-dictionary sparse representation based face sketch-photo synthesis model is constructed. In the proposed model, LLE is used to estimate an initial sketch or photo, while the multi-dictionary sparse representation model is applied to generate the high frequency and detail information. Finally, by linear superimposing, the enhanced face sketch or photo can be obtained. Experimental results show that sketches and photos synthesized by the proposed method have higher definition and much richer detail information resulting in a higher face recognition rate between sketches and photos.", "fno": "4541a082", "keywords": [ "Face Recognition", "Multi Dictionary", "Sketch Photo Synthesis", "Sparse Representation" ], "authors": [ { "affiliation": null, "fullName": "Nannan Wang", "givenName": "Nannan", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xinbo Gao", "givenName": "Xinbo", "surname": "Gao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dacheng Tao", "givenName": "Dacheng", "surname": "Tao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xuelong Li", "givenName": "Xuelong", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "icig", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-08-01T00:00:00", "pubType": "proceedings", "pages": "82-87", "year": "2011", "issn": null, "isbn": "978-0-7695-4541-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4541a076", "articleId": "12OmNzAohW8", "__typename": "AdjacentArticleType" }, "next": { "fno": "4541a088", "articleId": "12OmNwc3wux", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2011/4541/0/4541a412", "title": "Local Regression Model for Automatic Face Sketch Generation", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a412/12OmNAq3hxY", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2018/2335/0/233501a083", "title": "High-Quality Facial Photo-Sketch Synthesis Using Multi-Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/fg/2018/233501a083/12OmNBv2CeE", "parentPublication": { "id": "proceedings/fg/2018/2335/0", "title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460802", "title": "Smoothness-constrained face photo-sketch synthesis using sparse representation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460802/12OmNxE2mI9", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995324", "title": "Coupled information-theoretic encoding for face photo-sketch recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995324/12OmNxecS2L", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1996/7258/0/72580586", "title": "A Framework for Recognizing a Facial Image from a Police Sketch", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1996/72580586/12OmNyY4rwP", "parentPublication": { "id": "proceedings/cvpr/1996/7258/0", "title": "Proceedings CVPR IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109c146", "title": "Face Sketch Synthesis via Sparse Representation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109c146/12OmNzUgdi2", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/11/ttp2009111955", "title": "Face Photo-Sketch Synthesis and Recognition", "doi": null, "abstractUrl": "/journal/tp/2009/11/ttp2009111955/13rRUxOveaX", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000a612", "title": "Attribute-Centered Loss for Soft-Biometrics Guided Face Sketch-Photo Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000a612/17D45XreC7p", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500a944", "title": "Adversarial Open Domain Adaptation for Sketch-to-Photo Synthesis", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500a944/1B13AO3WYa4", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2019/0089/0/08756563", "title": "Improving Face Sketch Recognition via Adversarial Sketch-Photo Transformation", "doi": null, "abstractUrl": "/proceedings-article/fg/2019/08756563/1bzYpqF2pFK", "parentPublication": { "id": "proceedings/fg/2019/0089/0", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNC1GueH", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNxE2mI9", "doi": "", "title": "Smoothness-constrained face photo-sketch synthesis using sparse representation", "normalizedTitle": "Smoothness-constrained face photo-sketch synthesis using sparse representation", "abstract": "Face photo-sketch and sketch-photo synthesis have important usages in law enforcement. It is challenging to synthesize face sketches from photos because the drawing techniques and styles of artists' depictions are hard to be learned. To synthesize face photos from sketches is also hard due to its ill-posed nature. In order to avoid mosaic effects in the existed photo-sketch methods, we propose a smoothness-constrained photo-sketch synthesis method via sparse representation. The work is an extension of the previous work[1]. The method is modeled as the minimization of an energy function, a large scale convex optimization problem with l1-norm constraint. Since previous optimization methods are infeasible to solve our problem, we propose an iterative optimization approach, which decomposes the large scale optimization into a sequence of small scale optimizations and solve them iteratively to obtain the approximated optimal solution. The same synthesis strategy can be also used to synthesize photos from sketches. Experiments show its effectiveness.", "abstracts": [ { "abstractType": "Regular", "content": "Face photo-sketch and sketch-photo synthesis have important usages in law enforcement. It is challenging to synthesize face sketches from photos because the drawing techniques and styles of artists' depictions are hard to be learned. To synthesize face photos from sketches is also hard due to its ill-posed nature. In order to avoid mosaic effects in the existed photo-sketch methods, we propose a smoothness-constrained photo-sketch synthesis method via sparse representation. The work is an extension of the previous work[1]. The method is modeled as the minimization of an energy function, a large scale convex optimization problem with l1-norm constraint. Since previous optimization methods are infeasible to solve our problem, we propose an iterative optimization approach, which decomposes the large scale optimization into a sequence of small scale optimizations and solve them iteratively to obtain the approximated optimal solution. The same synthesis strategy can be also used to synthesize photos from sketches. Experiments show its effectiveness.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Face photo-sketch and sketch-photo synthesis have important usages in law enforcement. It is challenging to synthesize face sketches from photos because the drawing techniques and styles of artists' depictions are hard to be learned. To synthesize face photos from sketches is also hard due to its ill-posed nature. In order to avoid mosaic effects in the existed photo-sketch methods, we propose a smoothness-constrained photo-sketch synthesis method via sparse representation. The work is an extension of the previous work[1]. The method is modeled as the minimization of an energy function, a large scale convex optimization problem with l1-norm constraint. Since previous optimization methods are infeasible to solve our problem, we propose an iterative optimization approach, which decomposes the large scale optimization into a sequence of small scale optimizations and solve them iteratively to obtain the approximated optimal solution. The same synthesis strategy can be also used to synthesize photos from sketches. Experiments show its effectiveness.", "fno": "06460802", "keywords": [ "Convex Programming", "Face Recognition", "Image Representation", "Image Segmentation", "Iterative Methods", "Minimisation", "Smoothness Constrained Face Photo Sketch Synthesis", "Sparse Representation", "Face Sketch Photo Synthesis", "Law Enforcement", "Drawing Techniques", "Artist Depictions", "Ill Posed Nature", "Mosaic Effects", "Energy Function Minimization", "Large Scale Convex Optimization Problem", "L 1 Norm Constraint", "Iterative Optimization Approach", "Approximated Optimal Solution", "Face", "Dictionaries", "Optimization", "Vectors", "Encoding", "Training", "Face Recognition" ], "authors": [ { "affiliation": "College of Information Science and Technology, Beijing Normal University, China", "fullName": "Liang Chang", "givenName": "Liang", "surname": "Chang", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Software, Chinese Academy of Sciences, China", "fullName": "Xiaoming Deng", "givenName": "Xiaoming", "surname": "Deng", "__typename": "ArticleAuthorType" }, { "affiliation": "College of Information Science and Technology, Beijing Normal University, China", "fullName": "Mingquan Zhou", "givenName": "Mingquan", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "College of Information Science and Technology, Beijing Normal University, China", "fullName": "Fuqing Duan", "givenName": "Fuqing", "surname": "Duan", "__typename": "ArticleAuthorType" }, { "affiliation": "College of Information Science and Technology, Beijing Normal University, China", "fullName": "Zhongke Wu", "givenName": "Zhongke", "surname": "Wu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "3025-3029", "year": "2012", "issn": "1051-4651", "isbn": "978-1-4673-2216-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06460801", "articleId": "12OmNwFicXw", "__typename": "AdjacentArticleType" }, "next": { "fno": "06460803", "articleId": "12OmNAtst9q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2011/4541/0/4541a412", "title": "Local Regression Model for Automatic Face Sketch Generation", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a412/12OmNAq3hxY", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2018/2335/0/233501a083", "title": "High-Quality Facial Photo-Sketch Synthesis Using Multi-Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/fg/2018/233501a083/12OmNBv2CeE", "parentPublication": { "id": "proceedings/fg/2018/2335/0", "title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2011/4541/0/4541a082", "title": "Face Sketch-Photo Synthesis under Multi-dictionary Sparse Representation Framework", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a082/12OmNx0RIMA", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995324", "title": "Coupled information-theoretic encoding for face photo-sketch recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995324/12OmNxecS2L", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06239208", "title": "Data insufficiency in sketch versus photo face recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06239208/12OmNxuXczC", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1996/7258/0/72580586", "title": "A Framework for Recognizing a Facial Image from a Police Sketch", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1996/72580586/12OmNyY4rwP", "parentPublication": { "id": "proceedings/cvpr/1996/7258/0", "title": "Proceedings CVPR IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/11/ttp2009111955", "title": "Face Photo-Sketch Synthesis and Recognition", "doi": null, "abstractUrl": "/journal/tp/2009/11/ttp2009111955/13rRUxOveaX", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000a612", "title": "Attribute-Centered Loss for Soft-Biometrics Guided Face Sketch-Photo Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000a612/17D45XreC7p", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500a944", "title": "Adversarial Open Domain Adaptation for Sketch-to-Photo Synthesis", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500a944/1B13AO3WYa4", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2019/0089/0/08756563", "title": "Improving Face Sketch Recognition via Adversarial Sketch-Photo Transformation", "doi": null, "abstractUrl": "/proceedings-article/fg/2019/08756563/1bzYpqF2pFK", "parentPublication": { "id": "proceedings/fg/2019/0089/0", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAg7k0k", "title": "2018 IEEE Winter Applications of Computer Vision Workshops (WACVW)", "acronym": "wacvw", "groupId": "1806264", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "12OmNySG3Qj", "doi": "10.1109/WACVW.2018.00006", "title": "Facial Attributes Guided Deep Sketch-to-Photo Synthesis", "normalizedTitle": "Facial Attributes Guided Deep Sketch-to-Photo Synthesis", "abstract": "Face sketch-photo synthesis is a critical application in law enforcement and digital entertainment industry. Despite the significant improvements in sketch-to-photo synthesis techniques, existing methods have still serious limitations in practice, such as the need for paired data in the training phase or having no control on enforcing facial attributes over the synthesized image. In this work, we present a new framework, which is a conditional version of Cycle-GAN, conditioned on facial attributes. The proposed network forces facial attributes, such as skin and hair color, on the synthesized photo and does not need a set of aligned face-sketch pairs during its training. We evaluate the proposed network by training on two real and synthetic sketch datasets. The hand-sketch images of the FERET dataset and the color face images from the WVU Multi-modal dataset are used as an unpaired input to the proposed conditional CycleGAN with the skin color as the controlled face attribute. For more attribute guided evaluation, a synthetic sketch dataset is created from the CelebA dataset and used to evaluate the performance of the network by forcing several desired facial attributes on the synthesized faces.", "abstracts": [ { "abstractType": "Regular", "content": "Face sketch-photo synthesis is a critical application in law enforcement and digital entertainment industry. Despite the significant improvements in sketch-to-photo synthesis techniques, existing methods have still serious limitations in practice, such as the need for paired data in the training phase or having no control on enforcing facial attributes over the synthesized image. In this work, we present a new framework, which is a conditional version of Cycle-GAN, conditioned on facial attributes. The proposed network forces facial attributes, such as skin and hair color, on the synthesized photo and does not need a set of aligned face-sketch pairs during its training. We evaluate the proposed network by training on two real and synthetic sketch datasets. The hand-sketch images of the FERET dataset and the color face images from the WVU Multi-modal dataset are used as an unpaired input to the proposed conditional CycleGAN with the skin color as the controlled face attribute. For more attribute guided evaluation, a synthetic sketch dataset is created from the CelebA dataset and used to evaluate the performance of the network by forcing several desired facial attributes on the synthesized faces.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Face sketch-photo synthesis is a critical application in law enforcement and digital entertainment industry. Despite the significant improvements in sketch-to-photo synthesis techniques, existing methods have still serious limitations in practice, such as the need for paired data in the training phase or having no control on enforcing facial attributes over the synthesized image. In this work, we present a new framework, which is a conditional version of Cycle-GAN, conditioned on facial attributes. The proposed network forces facial attributes, such as skin and hair color, on the synthesized photo and does not need a set of aligned face-sketch pairs during its training. We evaluate the proposed network by training on two real and synthetic sketch datasets. The hand-sketch images of the FERET dataset and the color face images from the WVU Multi-modal dataset are used as an unpaired input to the proposed conditional CycleGAN with the skin color as the controlled face attribute. For more attribute guided evaluation, a synthetic sketch dataset is created from the CelebA dataset and used to evaluate the performance of the network by forcing several desired facial attributes on the synthesized faces.", "fno": "518801a001", "keywords": [ "Face Recognition", "Image Colour Analysis", "Neural Nets", "Color Face Images", "Controlled Face Attribute", "Attribute Guided Evaluation", "Synthetic Sketch Dataset", "Face Sketch Photo Synthesis", "Law Enforcement", "Digital Entertainment Industry", "Paired Data", "Training Phase", "Aligned Face Sketch Pairs", "Hand Sketch Images", "Facial Attributes Guided Deep Sketch To Photo Synthesis", "WVU Multimodal Dataset", "Cycle GAN", "FERET Dataset", "Skin Color", "Generative Adversarial Networks", "Face", "Training", "Generators", "Gallium Nitride", "Hidden Markov Models", "Linear Programming", "Law Enforcement" ], "authors": [ { "affiliation": null, "fullName": "Hadi Kazemi", "givenName": "Hadi", "surname": "Kazemi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mehdi Iranmanesh", "givenName": "Mehdi", "surname": "Iranmanesh", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ali Dabouei", "givenName": "Ali", "surname": "Dabouei", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Sobhan Soleymani", "givenName": "Sobhan", "surname": "Soleymani", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Nasser M. Nasrabadi", "givenName": "Nasser", "surname": "M. Nasrabadi", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "1-8", "year": "2018", "issn": null, "isbn": "978-1-5386-5188-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "518801z007", "articleId": "12OmNAtK4nx", "__typename": "AdjacentArticleType" }, "next": { "fno": "518801a009", "articleId": "12OmNvzJG9c", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2018/2335/0/233501a083", "title": "High-Quality Facial Photo-Sketch Synthesis Using Multi-Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/fg/2018/233501a083/12OmNBv2CeE", "parentPublication": { "id": "proceedings/fg/2018/2335/0", "title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/11/ttp2009111955", "title": "Face Photo-Sketch Synthesis and Recognition", "doi": null, "abstractUrl": "/journal/tp/2009/11/ttp2009111955/13rRUxOveaX", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000j136", "title": "Sketch-a-Classifier: Sketch-Based Photo Classifier Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000j136/17D45VObpQ8", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a033", "title": "Facial Expression Editing in Face Sketch Using Shape Space Theory", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a033/17D45XacGkk", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000a612", "title": "Attribute-Centered Loss for Soft-Biometrics Guided Face Sketch-Photo Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000a612/17D45XreC7p", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500a406", "title": "Recovering Faces From Portraits with Auxiliary Facial Attributes", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500a406/18j8OtlT0eA", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500a944", "title": "Adversarial Open Domain Adaptation for Sketch-to-Photo Synthesis", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500a944/1B13AO3WYa4", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2019/0089/0/08756586", "title": "Attribute-Guided Sketch Generation", "doi": null, "abstractUrl": "/proceedings-article/fg/2019/08756586/1bzYnWp7xzG", "parentPublication": { "id": "proceedings/fg/2019/0089/0", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150741", "title": "Quality Guided Sketch-to-Photo Image Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150741/1lPH7Mwrspq", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceai/2021/3960/0/396000a191", "title": "Residual Enhancement Network for Realistic Face Sketch-Photo Synthesis", "doi": null, "abstractUrl": "/proceedings-article/icceai/2021/396000a191/1xqyStBl8qc", "parentPublication": { "id": "proceedings/icceai/2021/3960/0", "title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyKJiaV", "title": "Pattern Recognition, International Conference on", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNzUgdi2", "doi": "10.1109/ICPR.2010.526", "title": "Face Sketch Synthesis via Sparse Representation", "normalizedTitle": "Face Sketch Synthesis via Sparse Representation", "abstract": "Face sketch synthesis with a photo is challenging due to that the psychological mechanism of sketch generation is difficult to be expressed precisely by rules. Current learning-based sketch synthesis methods concentrate on learning the rules by optimizing cost functions with low-level image features. In this paper, a new face sketch synthesis method is presented, which is inspired by recent advances in sparse signal representation and neuroscience that human brain probably perceives images using high-level features which are sparse. Sparse representations are desired in sketch synthesis due to that sparseness can adaptively selects the most relevant samples which give best representations of the input photo. We assume that the face photo patch and its corresponding sketch patch follow the same sparse representation. In the feature extraction, we select succinct high-level features by using the sparse coding technique, and in the sketch synthesis process each sketch patch is synthesized with respect to high-level features by solving an Z_$l_1$_Z-norm optimization. Experiments have been given on CUHK database to show that our method can resemble the true sketch fairly well.", "abstracts": [ { "abstractType": "Regular", "content": "Face sketch synthesis with a photo is challenging due to that the psychological mechanism of sketch generation is difficult to be expressed precisely by rules. Current learning-based sketch synthesis methods concentrate on learning the rules by optimizing cost functions with low-level image features. In this paper, a new face sketch synthesis method is presented, which is inspired by recent advances in sparse signal representation and neuroscience that human brain probably perceives images using high-level features which are sparse. Sparse representations are desired in sketch synthesis due to that sparseness can adaptively selects the most relevant samples which give best representations of the input photo. We assume that the face photo patch and its corresponding sketch patch follow the same sparse representation. In the feature extraction, we select succinct high-level features by using the sparse coding technique, and in the sketch synthesis process each sketch patch is synthesized with respect to high-level features by solving an $l_1$-norm optimization. Experiments have been given on CUHK database to show that our method can resemble the true sketch fairly well.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Face sketch synthesis with a photo is challenging due to that the psychological mechanism of sketch generation is difficult to be expressed precisely by rules. Current learning-based sketch synthesis methods concentrate on learning the rules by optimizing cost functions with low-level image features. In this paper, a new face sketch synthesis method is presented, which is inspired by recent advances in sparse signal representation and neuroscience that human brain probably perceives images using high-level features which are sparse. Sparse representations are desired in sketch synthesis due to that sparseness can adaptively selects the most relevant samples which give best representations of the input photo. We assume that the face photo patch and its corresponding sketch patch follow the same sparse representation. In the feature extraction, we select succinct high-level features by using the sparse coding technique, and in the sketch synthesis process each sketch patch is synthesized with respect to high-level features by solving an --norm optimization. Experiments have been given on CUHK database to show that our method can resemble the true sketch fairly well.", "fno": "4109c146", "keywords": [ "Face Sketch", "Image Synthesis", "Sparse Representation" ], "authors": [ { "affiliation": null, "fullName": "Liang Chang", "givenName": "Liang", "surname": "Chang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mingquan Zhou", "givenName": "Mingquan", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yanjun Han", "givenName": "Yanjun", "surname": "Han", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xiaoming Deng", "givenName": "Xiaoming", "surname": "Deng", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-08-01T00:00:00", "pubType": "proceedings", "pages": "2146-2149", "year": "2010", "issn": "1051-4651", "isbn": "978-0-7695-4109-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4109c142", "articleId": "12OmNqC2v2L", "__typename": "AdjacentArticleType" }, "next": { "fno": "4109c150", "articleId": "12OmNyYDDIN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2011/4541/0/4541a412", "title": "Local Regression Model for Automatic Face Sketch Generation", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a412/12OmNAq3hxY", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a152", "title": "3D Facial Surface and Texture Synthesis Using 2D Landmarks from a Single Face Sketch", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a152/12OmNC2xhCk", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2003/1950/1/195010687", "title": "Face Sketch Synthesis and Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2003/195010687/12OmNCcbE0u", "parentPublication": { "id": "proceedings/iccv/2003/1950/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2018/4886/0/488601a485", "title": "Face Sketch Synthesis with Style Transfer Using Pyramid Column Feature", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601a485/12OmNqzu6RJ", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/138P1C30", "title": "Markov Weight Fields for face sketch synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/138P1C30/12OmNvk7JWd", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2011/4541/0/4541a082", "title": "Face Sketch-Photo Synthesis under Multi-dictionary Sparse Representation Framework", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a082/12OmNx0RIMA", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460802", "title": "Smoothness-constrained face photo-sketch synthesis using sparse representation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460802/12OmNxE2mI9", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/12/08440073", "title": "Deep Neural Representation Guided Face Sketch Synthesis", "doi": null, "abstractUrl": "/journal/tg/2019/12/08440073/13rRUxAASTg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/11/ttp2009111955", "title": "Face Photo-Sketch Synthesis and Recognition", "doi": null, "abstractUrl": "/journal/tp/2009/11/ttp2009111955/13rRUxOveaX", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceai/2021/3960/0/396000a191", "title": "Residual Enhancement Network for Realistic Face Sketch-Photo Synthesis", "doi": null, "abstractUrl": "/proceedings-article/icceai/2021/396000a191/1xqyStBl8qc", "parentPublication": { "id": "proceedings/icceai/2021/3960/0", "title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1IHotVZum6Q", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "9956007", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1IHqemvhsPe", "doi": "10.1109/ICPR56361.2022.9956661", "title": "MOST-Net: A Memory Oriented Style Transfer Network for Face Sketch Synthesis", "normalizedTitle": "MOST-Net: A Memory Oriented Style Transfer Network for Face Sketch Synthesis", "abstract": "Face sketch synthesis has been widely used in multimedia entertainment and law enforcement. Despite the recent developments in deep neural networks, accurate and realistic face sketch synthesis is still a challenging task due to the diversity and complexity of human faces. Current image-to-image translation-based face sketch synthesis frequently encounters over-fitting problems when it comes to small-scale datasets. To tackle this problem, we present an end-to-end Memory Oriented Style Transfer Network (MOST-Net) for face sketch synthesis which can produce high-fidelity sketches with limited data. Specifically, an external self-supervised dynamic memory module is introduced to capture the domain alignment knowledge in the long term. In this way, our proposed model could obtain the domain-transfer ability by establishing the durable relationship between faces and corresponding sketches on the feature level. Furthermore, we design a novel Memory Refinement Loss (MR Loss) for feature alignment in the memory module, which enhances the accuracy of memory slots in an unsupervised manner. Extensive experiments on the CUFS and the CUFSF datasets show that our MOST-Net achieves state-of-the-art performance, especially in terms of the Structural Similarity Index(SSIM).", "abstracts": [ { "abstractType": "Regular", "content": "Face sketch synthesis has been widely used in multimedia entertainment and law enforcement. Despite the recent developments in deep neural networks, accurate and realistic face sketch synthesis is still a challenging task due to the diversity and complexity of human faces. Current image-to-image translation-based face sketch synthesis frequently encounters over-fitting problems when it comes to small-scale datasets. To tackle this problem, we present an end-to-end Memory Oriented Style Transfer Network (MOST-Net) for face sketch synthesis which can produce high-fidelity sketches with limited data. Specifically, an external self-supervised dynamic memory module is introduced to capture the domain alignment knowledge in the long term. In this way, our proposed model could obtain the domain-transfer ability by establishing the durable relationship between faces and corresponding sketches on the feature level. Furthermore, we design a novel Memory Refinement Loss (MR Loss) for feature alignment in the memory module, which enhances the accuracy of memory slots in an unsupervised manner. Extensive experiments on the CUFS and the CUFSF datasets show that our MOST-Net achieves state-of-the-art performance, especially in terms of the Structural Similarity Index(SSIM).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Face sketch synthesis has been widely used in multimedia entertainment and law enforcement. Despite the recent developments in deep neural networks, accurate and realistic face sketch synthesis is still a challenging task due to the diversity and complexity of human faces. Current image-to-image translation-based face sketch synthesis frequently encounters over-fitting problems when it comes to small-scale datasets. To tackle this problem, we present an end-to-end Memory Oriented Style Transfer Network (MOST-Net) for face sketch synthesis which can produce high-fidelity sketches with limited data. Specifically, an external self-supervised dynamic memory module is introduced to capture the domain alignment knowledge in the long term. In this way, our proposed model could obtain the domain-transfer ability by establishing the durable relationship between faces and corresponding sketches on the feature level. Furthermore, we design a novel Memory Refinement Loss (MR Loss) for feature alignment in the memory module, which enhances the accuracy of memory slots in an unsupervised manner. Extensive experiments on the CUFS and the CUFSF datasets show that our MOST-Net achieves state-of-the-art performance, especially in terms of the Structural Similarity Index(SSIM).", "fno": "09956661", "keywords": [ "Deep Learning Artificial Intelligence", "Face Recognition", "Feature Extraction", "Realistic Images", "Unsupervised Learning", "Current Image To Image Translation Based Face Sketch Synthesis", "Deep Neural Networks", "End To End Memory Oriented Style Transfer Network", "High Fidelity Sketches", "Human Faces", "Law Enforcement", "Memory Refinement Loss", "MOST Net", "Multimedia Entertainment", "Realistic Face Sketch Synthesis", "Self Supervised Dynamic Memory Module", "Structural Similarity Index", "Training", "Deep Learning", "Dictionaries", "Law Enforcement", "Face Recognition", "Neural Networks", "Entertainment Industry" ], "authors": [ { "affiliation": "Chinese Academy of Sciences,CRIPAC & NLPR, Institute of Automation,Beijing,China,100190", "fullName": "Fan Ji", "givenName": "Fan", "surname": "Ji", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,CRIPAC & NLPR, Institute of Automation,Beijing,China,100190", "fullName": "Muyi Sun", "givenName": "Muyi", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing University of Posts and Telecommunications,School of AI/Auto,Beijing,China,100876", "fullName": "Xingqun Qi", "givenName": "Xingqun", "surname": "Qi", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,CRIPAC & NLPR, Institute of Automation,Beijing,China,100190", "fullName": "Qi Li", "givenName": "Qi", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,CRIPAC & NLPR, Institute of Automation,Beijing,China,100190", "fullName": "Zhenan Sun", "givenName": "Zhenan", "surname": "Sun", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "733-739", "year": "2022", "issn": null, "isbn": "978-1-6654-9062-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09956555", "articleId": "1IHphnxTXk4", "__typename": "AdjacentArticleType" }, "next": { "fno": "09956152", "articleId": "1IHoxlLFnTa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2018/2335/0/233501a083", "title": "High-Quality Facial Photo-Sketch Synthesis Using Multi-Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/fg/2018/233501a083/12OmNBv2CeE", "parentPublication": { "id": "proceedings/fg/2018/2335/0", "title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2003/1950/1/195010687", "title": "Face Sketch Synthesis and Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2003/195010687/12OmNCcbE0u", "parentPublication": { "id": "proceedings/iccv/2003/1950/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2018/4886/0/488601a485", "title": "Face Sketch Synthesis with Style Transfer Using Pyramid Column Feature", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601a485/12OmNqzu6RJ", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2011/4541/0/4541a082", "title": "Face Sketch-Photo Synthesis under Multi-dictionary Sparse Representation Framework", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a082/12OmNx0RIMA", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460802", "title": "Smoothness-constrained face photo-sketch synthesis using sparse representation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460802/12OmNxE2mI9", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995324", "title": "Coupled information-theoretic encoding for face photo-sketch recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995324/12OmNxecS2L", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2018/5188/0/518801a001", "title": "Facial Attributes Guided Deep Sketch-to-Photo Synthesis", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2018/518801a001/12OmNySG3Qj", "parentPublication": { "id": "proceedings/wacvw/2018/5188/0", "title": "2018 IEEE Winter Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733a619", "title": "Transfer Learning Based Evolutionary Algorithm for Composite Face Sketch Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733a619/12OmNyyO8Le", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2019/0089/0/08756563", "title": "Improving Face Sketch Recognition via Adversarial Sketch-Photo Transformation", "doi": null, "abstractUrl": "/proceedings-article/fg/2019/08756563/1bzYpqF2pFK", "parentPublication": { "id": "proceedings/fg/2019/0089/0", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1xqyG9WHggU", "title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "acronym": "icceai", "groupId": "1843184", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1xqyStBl8qc", "doi": "10.1109/ICCEAI52939.2021.00037", "title": "Residual Enhancement Network for Realistic Face Sketch-Photo Synthesis", "normalizedTitle": "Residual Enhancement Network for Realistic Face Sketch-Photo Synthesis", "abstract": "Face sketch-photo synthesis is a significant challenge task in computer vision area, due to the blurred facial details and color distortion produced by the existing approaches. In this paper, we propose a realistic face sketch-photo synthesis method based on residual enhancement network. In the network, a residual enhancement module is constructed and embedded in U-Net to improve the feature representation capability of the deep network. In addition, a detail loss and a perception loss are adopted to constrain the synthesized image has abundant detail and realistic photo style. Experimental results on multiple face sketch datasets indicate that the proposed method obtains superior performance than the state-of-the-art methods, both in terms of visual perception and objective evaluations.", "abstracts": [ { "abstractType": "Regular", "content": "Face sketch-photo synthesis is a significant challenge task in computer vision area, due to the blurred facial details and color distortion produced by the existing approaches. In this paper, we propose a realistic face sketch-photo synthesis method based on residual enhancement network. In the network, a residual enhancement module is constructed and embedded in U-Net to improve the feature representation capability of the deep network. In addition, a detail loss and a perception loss are adopted to constrain the synthesized image has abundant detail and realistic photo style. Experimental results on multiple face sketch datasets indicate that the proposed method obtains superior performance than the state-of-the-art methods, both in terms of visual perception and objective evaluations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Face sketch-photo synthesis is a significant challenge task in computer vision area, due to the blurred facial details and color distortion produced by the existing approaches. In this paper, we propose a realistic face sketch-photo synthesis method based on residual enhancement network. In the network, a residual enhancement module is constructed and embedded in U-Net to improve the feature representation capability of the deep network. In addition, a detail loss and a perception loss are adopted to constrain the synthesized image has abundant detail and realistic photo style. Experimental results on multiple face sketch datasets indicate that the proposed method obtains superior performance than the state-of-the-art methods, both in terms of visual perception and objective evaluations.", "fno": "396000a191", "keywords": [ "Computer Vision", "Convolutional Neural Nets", "Deep Learning Artificial Intelligence", "Face Recognition", "Image Colour Analysis", "Image Enhancement", "Image Representation", "Visual Perception", "Multiple Face Sketch Datasets", "Residual Enhancement Network", "Computer Vision Area", "Blurred Facial Details", "Color Distortion", "Realistic Face Sketch Photo Synthesis Method", "Residual Enhancement Module", "Feature Representation Capability", "Deep Network", "Realistic Photo Style", "U Net", "Perception Loss", "Detail Loss", "Synthesized Image", "Visual Perception", "Computer Vision", "Image Color Analysis", "Face Recognition", "Distortion", "Task Analysis", "Artificial Intelligence", "Visual Perception", "Face Sketch Photo Synthesis", "Residual Enhancement Network", "Detail Loss", "Perception Loss" ], "authors": [ { "affiliation": "Jiangxi University of Finance and Economics,School of Software and Internet of Things Engineering,Nanchang,China", "fullName": "Weiguo Wan", "givenName": "Weiguo", "surname": "Wan", "__typename": "ArticleAuthorType" }, { "affiliation": "Jiangxi University of Finance and Economics,School of Information Technology,Nanchang,China", "fullName": "Yong Yang", "givenName": "Yong", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Jiangxi University of Finance and Economics,School of Information Technology,Nanchang,China", "fullName": "Wei Tu", "givenName": "Wei", "surname": "Tu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icceai", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-08-01T00:00:00", "pubType": "proceedings", "pages": "191-195", "year": "2021", "issn": null, "isbn": "978-1-6654-3960-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "396000a187", "articleId": "1xqyOUo8PyU", "__typename": "AdjacentArticleType" }, "next": { "fno": "396000a196", "articleId": "1xqyHWd0uWI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2018/2335/0/233501a083", "title": "High-Quality Facial Photo-Sketch Synthesis Using Multi-Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/fg/2018/233501a083/12OmNBv2CeE", "parentPublication": { "id": "proceedings/fg/2018/2335/0", "title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2011/4541/0/4541a082", "title": "Face Sketch-Photo Synthesis under Multi-dictionary Sparse Representation Framework", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a082/12OmNx0RIMA", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460802", "title": "Smoothness-constrained face photo-sketch synthesis using sparse representation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460802/12OmNxE2mI9", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06239208", "title": "Data insufficiency in sketch versus photo face recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06239208/12OmNxuXczC", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2018/5188/0/518801a001", "title": "Facial Attributes Guided Deep Sketch-to-Photo Synthesis", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2018/518801a001/12OmNySG3Qj", "parentPublication": { "id": "proceedings/wacvw/2018/5188/0", "title": "2018 IEEE Winter Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/11/ttp2009111955", "title": "Face Photo-Sketch Synthesis and Recognition", "doi": null, "abstractUrl": "/journal/tp/2009/11/ttp2009111955/13rRUxOveaX", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000a612", "title": "Attribute-Centered Loss for Soft-Biometrics Guided Face Sketch-Photo Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000a612/17D45XreC7p", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500a944", "title": "Adversarial Open Domain Adaptation for Sketch-to-Photo Synthesis", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500a944/1B13AO3WYa4", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2019/0089/0/08756563", "title": "Improving Face Sketch Recognition via Adversarial Sketch-Photo Transformation", "doi": null, "abstractUrl": "/proceedings-article/fg/2019/08756563/1bzYpqF2pFK", "parentPublication": { "id": "proceedings/fg/2019/0089/0", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412917", "title": "Siamese Graph Convolution Network for Face Sketch Recognition: An application using Graph structure for face photo-sketch recognition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412917/1tmjHxsSvKM", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45W9KVIS", "doi": "10.1109/CVPR.2018.00219", "title": "LayoutNet: Reconstructing the 3D Room Layout from a Single RGB Image", "normalizedTitle": "LayoutNet: Reconstructing the 3D Room Layout from a Single RGB Image", "abstract": "We propose an algorithm to predict room layout from a single image that generalizes across panoramas and perspective images, cuboid layouts and more general layouts (e.g. \"L\"-shape room). Our method operates directly on the panoramic image, rather than decomposing into perspective images as do recent works. Our network architecture is similar to that of RoomNet [15], but we show improvements due to aligning the image based on vanishing points, predicting multiple layout elements (corners, boundaries, size and translation), and fitting a constrained Manhattan layout to the resulting predictions. Our method compares well in speed and accuracy to other existing work on panoramas, achieves among the best accuracy for perspective images, and can handle both cuboid-shaped and more general Manhattan layouts.", "abstracts": [ { "abstractType": "Regular", "content": "We propose an algorithm to predict room layout from a single image that generalizes across panoramas and perspective images, cuboid layouts and more general layouts (e.g. \"L\"-shape room). Our method operates directly on the panoramic image, rather than decomposing into perspective images as do recent works. Our network architecture is similar to that of RoomNet [15], but we show improvements due to aligning the image based on vanishing points, predicting multiple layout elements (corners, boundaries, size and translation), and fitting a constrained Manhattan layout to the resulting predictions. Our method compares well in speed and accuracy to other existing work on panoramas, achieves among the best accuracy for perspective images, and can handle both cuboid-shaped and more general Manhattan layouts.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose an algorithm to predict room layout from a single image that generalizes across panoramas and perspective images, cuboid layouts and more general layouts (e.g. \"L\"-shape room). Our method operates directly on the panoramic image, rather than decomposing into perspective images as do recent works. Our network architecture is similar to that of RoomNet [15], but we show improvements due to aligning the image based on vanishing points, predicting multiple layout elements (corners, boundaries, size and translation), and fitting a constrained Manhattan layout to the resulting predictions. Our method compares well in speed and accuracy to other existing work on panoramas, achieves among the best accuracy for perspective images, and can handle both cuboid-shaped and more general Manhattan layouts.", "fno": "642000c051", "keywords": [ "Image Colour Analysis", "Image Reconstruction", "Perspective Images", "Cuboid Shaped Manhattan Layouts", "Room Layout", "Single RGB Image", "Single Image", "Cuboid Layouts", "General Layouts", "Panoramic Image", "Multiple Layout Elements", "Constrained Manhattan Layout", "Layout", "Three Dimensional Displays", "Convolution", "Two Dimensional Displays", "Training", "Cameras", "Estimation" ], "authors": [ { "affiliation": null, "fullName": "Chuhang Zou", "givenName": "Chuhang", "surname": "Zou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Alex Colburn", "givenName": "Alex", "surname": "Colburn", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qi Shan", "givenName": "Qi", "surname": "Shan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Derek Hoiem", "givenName": "Derek", "surname": "Hoiem", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "2051-2059", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000c041", "articleId": "17D45XvMcaA", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000c060", "articleId": "17D45WK5Anj", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032e875", "title": "RoomNet: End-to-End Room Layout Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032e875/12OmNyNQSEd", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a519", "title": "Room Layout Estimation with Object and Material Attributes Information Using a Spherical Camera", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a519/12OmNyv7meo", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500a235", "title": "Learning to Reconstruct 3D Non-Cuboid Room Layout from a Single RGB Image", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500a235/1B13jeDLlG8", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900f188", "title": "3D Room Layout Recovery Generalizing across Manhattan and Non-Manhattan Worlds", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900f188/1G565Rf5zFu", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600i606", "title": "PSMNet: Position-aware Stereo Merging Network for Room Layout Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600i606/1H1ly5AiV0c", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a875", "title": "Robust Planar Optimization for General 3D Room Layout Estimation", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a875/1J7W8qbNT56", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300d358", "title": "DuLa-Net: A Dual-Projection Network for Estimating Room Layouts From a Single RGB Panorama", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300d358/1gys6O5ZjFu", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300f673", "title": "Floorplan-Jigsaw: Jointly Estimating Scene Layout and Aligning Partial Scans", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300f673/1hQqwdZMWL6", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900d697", "title": "Scaled 360 layouts: Revisiting non-central panoramas", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900d697/1yXsGU0lKXC", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900c133", "title": "Zillow Indoor Dataset: Annotated Floor Plans With 360° Panoramas and 3D Room Layouts", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900c133/1yeKk438NjO", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G55WEFExd6", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G565Rf5zFu", "doi": "10.1109/CVPRW56347.2022.00567", "title": "3D Room Layout Recovery Generalizing across Manhattan and Non-Manhattan Worlds", "normalizedTitle": "3D Room Layout Recovery Generalizing across Manhattan and Non-Manhattan Worlds", "abstract": "Recent 3D room layout recovery approaches mostly concentrate on Manhattan layouts, where the vertical walls are orthogonal with respect to each other, even though there are many rooms with non-Manhattan layouts in the real world. This paper presents a room layout recovery method generalizing across Manhattan and non-Manhattan worlds. Without introducing additional supervision, we extend current Manhattan layout recovery methods by predicting an extra surface normal feature, which is further used for an adaptive post-processing to reconstruct layouts of arbitrary shapes. Experimental results show that our method has a great improvement on non-Manhattan layouts while being capable of generalizing across Manhattan and non-Manhattan layouts.", "abstracts": [ { "abstractType": "Regular", "content": "Recent 3D room layout recovery approaches mostly concentrate on Manhattan layouts, where the vertical walls are orthogonal with respect to each other, even though there are many rooms with non-Manhattan layouts in the real world. This paper presents a room layout recovery method generalizing across Manhattan and non-Manhattan worlds. Without introducing additional supervision, we extend current Manhattan layout recovery methods by predicting an extra surface normal feature, which is further used for an adaptive post-processing to reconstruct layouts of arbitrary shapes. Experimental results show that our method has a great improvement on non-Manhattan layouts while being capable of generalizing across Manhattan and non-Manhattan layouts.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent 3D room layout recovery approaches mostly concentrate on Manhattan layouts, where the vertical walls are orthogonal with respect to each other, even though there are many rooms with non-Manhattan layouts in the real world. This paper presents a room layout recovery method generalizing across Manhattan and non-Manhattan worlds. Without introducing additional supervision, we extend current Manhattan layout recovery methods by predicting an extra surface normal feature, which is further used for an adaptive post-processing to reconstruct layouts of arbitrary shapes. Experimental results show that our method has a great improvement on non-Manhattan layouts while being capable of generalizing across Manhattan and non-Manhattan layouts.", "fno": "873900f188", "keywords": [ "Cameras", "Document Image Processing", "Feature Extraction", "Image Reconstruction", "Image Sequences", "Pose Estimation", "Recovery Generalizing", "Non Manhattan Worlds", "Recent 3 D Room Layout Recovery", "Manhattan Layouts", "Non Manhattan Layouts", "Room Layout Recovery Method", "Current Manhattan Layout Recovery Methods", "Surface Reconstruction", "Computer Vision", "Three Dimensional Displays", "Shape", "Conferences", "Layout", "Transformers" ], "authors": [ { "affiliation": "Ricoh Software Research Center (Beijing) Co., Ltd.,Beijing,China", "fullName": "Haijing Jia", "givenName": "Haijing", "surname": "Jia", "__typename": "ArticleAuthorType" }, { "affiliation": "Ricoh Software Research Center (Beijing) Co., Ltd.,Beijing,China", "fullName": "Hong Yi", "givenName": "Hong", "surname": "Yi", "__typename": "ArticleAuthorType" }, { "affiliation": "Ricoh Company, Ltd.,Japan", "fullName": "Hirochika Fujiki", "givenName": "Hirochika", "surname": "Fujiki", "__typename": "ArticleAuthorType" }, { "affiliation": "Ricoh Software Research Center (Beijing) Co., Ltd.,Beijing,China", "fullName": "Hengzhi Zhang", "givenName": "Hengzhi", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Ricoh Software Research Center (Beijing) Co., Ltd.,Beijing,China", "fullName": "Wei Wang", "givenName": "Wei", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Ricoh Company, Ltd.,Japan", "fullName": "Makoto Odamaki", "givenName": "Makoto", "surname": "Odamaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "5188-5197", "year": "2022", "issn": null, "isbn": "978-1-6654-8739-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "873900f178", "articleId": "1G572IHNJf2", "__typename": "AdjacentArticleType" }, "next": { "fno": "873900f198", "articleId": "1G4F6LZzMic", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2009/3992/0/05206867", "title": "Manhattan-world stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206867/12OmNBC8ABC", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2009/3725/0/3725a231", "title": "A Distance-Based Technique for Non-Manhattan Layout Analysis", "doi": null, "abstractUrl": "/proceedings-article/icdar/2009/3725a231/12OmNqI04ML", "parentPublication": { "id": "proceedings/icdar/2009/3725/0", "title": "2009 10th International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccad/1989/1986/0/00077007", "title": "The Stickizer: a layout to symbolic converter", "doi": null, "abstractUrl": "/proceedings-article/iccad/1989/00077007/12OmNvpNIsu", "parentPublication": { "id": "proceedings/iccad/1989/1986/0", "title": "1989 IEEE International Conference on Computer-Aided Design", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2018/01/07839295", "title": "The Manhattan Frame Model—Manhattan World Inference in the Space of Surface Normals", "doi": null, "abstractUrl": "/journal/tp/2018/01/07839295/13rRUynZ5pm", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c051", "title": "LayoutNet: Reconstructing the 3D Room Layout from a Single RGB Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c051/17D45W9KVIS", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a875", "title": "Robust Planar Optimization for General 3D Room Layout Estimation", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a875/1J7W8qbNT56", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300d358", "title": "DuLa-Net: A Dual-Projection Network for Estimating Room Layouts From a Single RGB Panorama", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300d358/1gys6O5ZjFu", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300f673", "title": "Floorplan-Jigsaw: Jointly Estimating Scene Layout and Aligning Partial Scans", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300f673/1hQqwdZMWL6", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900d697", "title": "Scaled 360 layouts: Revisiting non-central panoramas", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900d697/1yXsGU0lKXC", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900c133", "title": "Zillow Indoor Dataset: Annotated Floor Plans With 360° Panoramas and 3D Room Layouts", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900c133/1yeKk438NjO", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1iU88XYyI", "doi": "10.1109/CVPR52688.2022.01648", "title": "Learning Object Context for Novel-view Scene Layout Generation", "normalizedTitle": "Learning Object Context for Novel-view Scene Layout Generation", "abstract": "Novel-view prediction of a scene has many applications. Existing works mainly focus on generating novel-view images via pixel-wise prediction in the image space, often resulting in severe ghosting and blurry artifacts. In this paper, we make the first attempt to explore novel-view prediction in the layout space, and introduce the new problem of novel-view scene layout generation. Given a single scene layout and the camera transformation as inputs, our goal is to generate a plausible scene layout for a specified viewpoint. Such a problem is challenging as it involves accurate understanding of the 3D geometry and semantics of the scene from as little as a single 2D scene layout. To tackle this challenging problem, we propose a deep model to capture contextualized object representation by explicitly modeling the object context transformation in the scene. The contextualized object representation is essential in generating geometrically and semantically consistent scene layouts of different views. Experiments show that our model outperforms several strong baselines on many indoor and outdoor scenes, both qualitatively and quantitatively. We also show that our model enables a wide range of applications, including novel-view image synthesis, novel-view image editing, and amodal object estimation.", "abstracts": [ { "abstractType": "Regular", "content": "Novel-view prediction of a scene has many applications. Existing works mainly focus on generating novel-view images via pixel-wise prediction in the image space, often resulting in severe ghosting and blurry artifacts. In this paper, we make the first attempt to explore novel-view prediction in the layout space, and introduce the new problem of novel-view scene layout generation. Given a single scene layout and the camera transformation as inputs, our goal is to generate a plausible scene layout for a specified viewpoint. Such a problem is challenging as it involves accurate understanding of the 3D geometry and semantics of the scene from as little as a single 2D scene layout. To tackle this challenging problem, we propose a deep model to capture contextualized object representation by explicitly modeling the object context transformation in the scene. The contextualized object representation is essential in generating geometrically and semantically consistent scene layouts of different views. Experiments show that our model outperforms several strong baselines on many indoor and outdoor scenes, both qualitatively and quantitatively. We also show that our model enables a wide range of applications, including novel-view image synthesis, novel-view image editing, and amodal object estimation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Novel-view prediction of a scene has many applications. Existing works mainly focus on generating novel-view images via pixel-wise prediction in the image space, often resulting in severe ghosting and blurry artifacts. In this paper, we make the first attempt to explore novel-view prediction in the layout space, and introduce the new problem of novel-view scene layout generation. Given a single scene layout and the camera transformation as inputs, our goal is to generate a plausible scene layout for a specified viewpoint. Such a problem is challenging as it involves accurate understanding of the 3D geometry and semantics of the scene from as little as a single 2D scene layout. To tackle this challenging problem, we propose a deep model to capture contextualized object representation by explicitly modeling the object context transformation in the scene. The contextualized object representation is essential in generating geometrically and semantically consistent scene layouts of different views. Experiments show that our model outperforms several strong baselines on many indoor and outdoor scenes, both qualitatively and quantitatively. We also show that our model enables a wide range of applications, including novel-view image synthesis, novel-view image editing, and amodal object estimation.", "fno": "694600q6969", "keywords": [ "Cameras", "Geometry", "Image Colour Analysis", "Image Reconstruction", "Image Representation", "Learning Artificial Intelligence", "Object Detection", "Rendering Computer Graphics", "Indoor Scenes", "Outdoor Scenes", "Novel View Image Synthesis", "Novel View Image Editing", "Novel View Prediction", "Novel View Images", "Pixel Wise Prediction", "Image Space", "Severe Ghosting", "Blurry Artifacts", "Layout Space", "Novel View Scene Layout Generation", "Single Scene Layout", "Plausible Scene Layout", "Contextualized Object Representation", "Object Context Transformation", "Consistent Scene Layouts", "Three Dimensional Displays", "Computational Modeling", "Layout", "Semantics", "Predictive Models", "Cameras", "Probabilistic Logic" ], "authors": [ { "affiliation": "School of Computer Science and Technology, Xidian University", "fullName": "Xiaotian Qiao", "givenName": "Xiaotian", "surname": "Qiao", "__typename": "ArticleAuthorType" }, { "affiliation": "City University of Hong Kong,Department of Computer Science", "fullName": "Gerhard P. Hancke", "givenName": "Gerhard P.", "surname": "Hancke", "__typename": "ArticleAuthorType" }, { "affiliation": "City University of Hong Kong,Department of Computer Science", "fullName": "Rynson W.H. Lau", "givenName": "Rynson W.H.", "surname": "Lau", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "16969-16978", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "694600q6959", "articleId": "1H0LjZwryvK", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600q6979", "articleId": "1H1i1hkAan6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2014/7000/1/7000a139", "title": "Detailed 3D Model Driven Single View Scene Understanding", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a139/12OmNAXxXid", "parentPublication": { "id": "proceedings/3dv/2014/7000/2", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118a232", "title": "Incorporating Scene Context and Object Layout into Appearance Modeling", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118a232/12OmNviHKkE", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200m2612", "title": "DeepPanoContext: Panoramic 3D Scene Understanding with Holistic Scene Context Graph and Relation-based Optimization", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200m2612/1BmI8yzArnO", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300f673", "title": "Floorplan-Jigsaw: Jointly Estimating Scene Layout and Aligning Partial Scans", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300f673/1hQqwdZMWL6", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c780", "title": "Mono-SF: Multi-View Geometry Meets Single-View Depth for Monocular Scene Flow Estimation of Dynamic Traffic Scenes", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c780/1hVli1JrdAY", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093519", "title": "Mono Lay out: Amodal scene layout from a single image", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093519/1jPblbvdOlW", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d753", "title": "End-to-End Optimization of Scene Layout", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d753/1m3ooUhHlVC", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900p5531", "title": "Projecting Your View Attentively: Monocular Road Scene Layout Estimation via Cross-view Transformation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900p5531/1yeJi81tkHu", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900a557", "title": "Cross-View Cross-Scene Multi-View Crowd Counting", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900a557/1yeKfeZSsXC", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900q6433", "title": "Layout-Guided Novel View Synthesis from a Single Indoor Panorama", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900q6433/1yeLX1b3EQg", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1mlrOB1Wo", "doi": "10.1109/CVPR52688.2022.00763", "title": "Interactive Image Synthesis with Panoptic Layout Generation", "normalizedTitle": "Interactive Image Synthesis with Panoptic Layout Generation", "abstract": "Interactive image synthesis from user-guided input is a challenging task when users wish to control the scene structure of a generated image with ease. Although remarkable progress has been made on layout-based image synthesis approaches, existing methods require high-precision inputs such as accurately placed bounding boxes, which might be constantly violated in an interactive setting. When placement of bounding boxes is subject to perturbation, layout-based models suffer from “missing regions” in the constructed semantic layouts and hence undesirable artifacts in the generated images. In this work, we propose Panoptic Layout Generative Adversarial Network (PLGAN) to address this challenge. The PLGAN employs panoptic theory which distinguishes object categories between “stuff” with amorphous boundaries and “things” with well-defined shapes, such that stuff and instance layouts are constructed through separate branches and later fused into panoptic layouts. In particular, the stuff layouts can take amorphous shapes and fill up the missing regions left out by the instance layouts. We experimentally compare our PLGAN with state-of-the-art layout-based models on the COCO-Stuff, Visual Genome, and Landscape datasets. The advantages of PLGAN are not only visually demonstrated but quantitatively verified in terms of inception score, Fréchet inception distance, classification accuracy score, and coverage. The code is available at https://github.com/wb-finalking/PLGAN.", "abstracts": [ { "abstractType": "Regular", "content": "Interactive image synthesis from user-guided input is a challenging task when users wish to control the scene structure of a generated image with ease. Although remarkable progress has been made on layout-based image synthesis approaches, existing methods require high-precision inputs such as accurately placed bounding boxes, which might be constantly violated in an interactive setting. When placement of bounding boxes is subject to perturbation, layout-based models suffer from “missing regions” in the constructed semantic layouts and hence undesirable artifacts in the generated images. In this work, we propose Panoptic Layout Generative Adversarial Network (PLGAN) to address this challenge. The PLGAN employs panoptic theory which distinguishes object categories between “stuff” with amorphous boundaries and “things” with well-defined shapes, such that stuff and instance layouts are constructed through separate branches and later fused into panoptic layouts. In particular, the stuff layouts can take amorphous shapes and fill up the missing regions left out by the instance layouts. We experimentally compare our PLGAN with state-of-the-art layout-based models on the COCO-Stuff, Visual Genome, and Landscape datasets. The advantages of PLGAN are not only visually demonstrated but quantitatively verified in terms of inception score, Fréchet inception distance, classification accuracy score, and coverage. The code is available at https://github.com/wb-finalking/PLGAN.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Interactive image synthesis from user-guided input is a challenging task when users wish to control the scene structure of a generated image with ease. Although remarkable progress has been made on layout-based image synthesis approaches, existing methods require high-precision inputs such as accurately placed bounding boxes, which might be constantly violated in an interactive setting. When placement of bounding boxes is subject to perturbation, layout-based models suffer from “missing regions” in the constructed semantic layouts and hence undesirable artifacts in the generated images. In this work, we propose Panoptic Layout Generative Adversarial Network (PLGAN) to address this challenge. The PLGAN employs panoptic theory which distinguishes object categories between “stuff” with amorphous boundaries and “things” with well-defined shapes, such that stuff and instance layouts are constructed through separate branches and later fused into panoptic layouts. In particular, the stuff layouts can take amorphous shapes and fill up the missing regions left out by the instance layouts. We experimentally compare our PLGAN with state-of-the-art layout-based models on the COCO-Stuff, Visual Genome, and Landscape datasets. The advantages of PLGAN are not only visually demonstrated but quantitatively verified in terms of inception score, Fréchet inception distance, classification accuracy score, and coverage. The code is available at https://github.com/wb-finalking/PLGAN.", "fno": "694600h773", "keywords": [ "Computational Geometry", "Computer Graphics", "Data Visualisation", "Feature Extraction", "Genomics", "Image Classification", "Image Colour Analysis", "Image Motion Analysis", "Image Reconstruction", "Image Representation", "Image Retrieval", "Image Segmentation", "Image Texture", "Statistical Analysis", "Visual Databases", "Interactive Image Synthesis", "Panoptic Layout Generation", "User Guided Input", "Layout Based Image Synthesis Approaches", "High Precision Inputs", "Accurately Placed Bounding Boxes", "Interactive Setting", "Missing Regions", "Constructed Semantic Layouts", "Generative Adversarial Network", "PLGAN", "Panoptic Theory", "Instance Layouts", "Panoptic Layouts", "Stuff Layouts", "State Of The Art Layout Based Models", "Visualization", "Image Synthesis", "Shape", "Perturbation Methods", "Layout", "Semantics", "Genomics" ], "authors": [ { "affiliation": "Huawei Technologies", "fullName": "Bo Wang", "givenName": "Bo", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Huawei Technologies", "fullName": "Tao Wu", "givenName": "Tao", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Huawei Technologies", "fullName": "Minfeng Zhu", "givenName": "Minfeng", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": "Huawei Technologies", "fullName": "Peng Du", "givenName": "Peng", "surname": "Du", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "7773-7782", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1mloFj0s0", "name": "pcvpr202269460-09880303s1-mm_694600h773.zip", "size": "1.35 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09880303s1-mm_694600h773.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600h764", "articleId": "1H1mMvyIwaQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600h783", "articleId": "1H0N7lmNc1q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/arvlsid/1995/7047/0/70470185", "title": "HAL: heuristic algorithms for layout synthesis", "doi": null, "abstractUrl": "/proceedings-article/arvlsid/1995/70470185/12OmNyRxFp4", "parentPublication": { "id": "proceedings/arvlsid/1995/7047/0", "title": "Advanced Research in VLSI, Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200n3799", "title": "Image Synthesis from Layout with Locality-Aware Mask Adaption", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200n3799/1BmGJmzmBTq", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600c426", "title": "Aesthetic Text Logo Synthesis via Content-aware Layout Inferring", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600c426/1H1hPvHfvNe", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j894", "title": "LayoutVAE: Stochastic Scene Layout Generation From a Label Set", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j894/1hQqjMNDJny", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300k0530", "title": "Image Synthesis From Reconfigurable Layout and Style", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300k0530/1hVlpxVSLMA", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800k0717", "title": "Learning Instance Occlusion for Panoptic Segmentation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800k0717/1m3nrw02SRy", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a235", "title": "Interactive and Scalable Layout Synthesis with Design Templates", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a235/1qpzCVbvokM", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/09/09427066", "title": "Learning Layout and Style Reconfigurable GANs for Controllable Image Synthesis", "doi": null, "abstractUrl": "/journal/tp/2022/09/09427066/1tuvzMfndhS", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900p5044", "title": "Context-Aware Layout to Image Generation with Enhanced Object Appearance", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900p5044/1yeJYCrbxPa", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900d731", "title": "LayoutTransformer: Scene Layout Generation with Conceptual and Spatial Diversity", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900d731/1yeKJqXSUh2", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hQqfuoOyHu", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hQqkMnK9Zm", "doi": "10.1109/ICCV.2019.00135", "title": "Layout-Induced Video Representation for Recognizing Agent-in-Place Actions", "normalizedTitle": "Layout-Induced Video Representation for Recognizing Agent-in-Place Actions", "abstract": "We address scene layout modeling for recognizing agent-in-place actions, which are actions associated with agents who perform them and the places where they occur, in the context of outdoor home surveillance. We introduce a novel representation to model the geometry and topology of scene layouts so that a network can generalize from the layouts observed in the training scenes to unseen scenes in the test set. This Layout-Induced Video Representation (LIVR) abstracts away low-level appearance variance and encodes geometric and topological relationships of places to explicitly model scene layout. LIVR partitions the semantic features of a scene into different places to force the network to learn generic place-based feature descriptions which are independent of specific scene layouts; then, LIVR dynamically aggregates features based on connectivities of places in each specific scene to model its layout. We introduce a new Agent-in-Place Action (APA) dataset to show that our method allows neural network models to generalize significantly better to unseen scenes.", "abstracts": [ { "abstractType": "Regular", "content": "We address scene layout modeling for recognizing agent-in-place actions, which are actions associated with agents who perform them and the places where they occur, in the context of outdoor home surveillance. We introduce a novel representation to model the geometry and topology of scene layouts so that a network can generalize from the layouts observed in the training scenes to unseen scenes in the test set. This Layout-Induced Video Representation (LIVR) abstracts away low-level appearance variance and encodes geometric and topological relationships of places to explicitly model scene layout. LIVR partitions the semantic features of a scene into different places to force the network to learn generic place-based feature descriptions which are independent of specific scene layouts; then, LIVR dynamically aggregates features based on connectivities of places in each specific scene to model its layout. We introduce a new Agent-in-Place Action (APA) dataset to show that our method allows neural network models to generalize significantly better to unseen scenes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We address scene layout modeling for recognizing agent-in-place actions, which are actions associated with agents who perform them and the places where they occur, in the context of outdoor home surveillance. We introduce a novel representation to model the geometry and topology of scene layouts so that a network can generalize from the layouts observed in the training scenes to unseen scenes in the test set. This Layout-Induced Video Representation (LIVR) abstracts away low-level appearance variance and encodes geometric and topological relationships of places to explicitly model scene layout. LIVR partitions the semantic features of a scene into different places to force the network to learn generic place-based feature descriptions which are independent of specific scene layouts; then, LIVR dynamically aggregates features based on connectivities of places in each specific scene to model its layout. We introduce a new Agent-in-Place Action (APA) dataset to show that our method allows neural network models to generalize significantly better to unseen scenes.", "fno": "480300b262", "keywords": [ "Feature Extraction", "Image Representation", "Learning Artificial Intelligence", "Neural Nets", "Video Signal Processing", "Scene Layout Modeling", "Outdoor Home Surveillance", "Training Scenes", "Unseen Scenes", "LIVR", "Low Level Appearance Variance", "Geometric Relationships", "Topological Relationships", "Generic Place Based Feature Descriptions", "Specific Scene Layouts", "Neural Network Models", "Layout Induced Video Representation", "Agent In Place Action Recognition", "Agent In Place Action Dataset", "Feature Extraction", "Layout", "Semantics", "Surveillance", "Transforms", "Three Dimensional Displays", "Aggregates" ], "authors": [ { "affiliation": "Waymo LLC.", "fullName": "Ruichi Yu", "givenName": "Ruichi", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Comcast", "fullName": "Hongcheng Wang", "givenName": "Hongcheng", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "DeepMind. Mountain View", "fullName": "Ang Li", "givenName": "Ang", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland. College Park", "fullName": "Jingxiao Zheng", "givenName": "Jingxiao", "surname": "Zheng", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Vlad Morariu", "givenName": "Vlad", "surname": "Morariu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland", "fullName": "Larry Davis", "givenName": "Larry", "surname": "Davis", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "1262-1272", "year": "2019", "issn": null, "isbn": "978-1-7281-4803-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "480300b252", "articleId": "1hQqjJBvIAw", "__typename": "AdjacentArticleType" }, "next": { "fno": "480300b273", "articleId": "1hVlSszR9sc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2017/5738/0/08031595", "title": "Visualizing the uncertainty induced by graph layout algorithms", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2017/08031595/12OmNs0C9DQ", "parentPublication": { "id": "proceedings/pacificvis/2017/5738/0", "title": "2017 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206557", "title": "Actions in context", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206557/12OmNviZlws", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200a984", "title": "LayoutTransformer: Layout Generation and Completion with Self-attention", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200a984/1BmF1IskDcs", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200g670", "title": "Generative Layout Modeling using Constraint Graphs", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200g670/1BmIILGrL1u", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600q6969", "title": "Learning Object Context for Novel-view Scene Layout Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600q6969/1H1iU88XYyI", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300b771", "title": "Hierarchy Denoising Recursive Autoencoders for 3D Scene Layout Prediction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300b771/1gyrweLj4EE", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j894", "title": "LayoutVAE: Stochastic Scene Layout Generation From a Label Set", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j894/1hQqjMNDJny", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300f673", "title": "Floorplan-Jigsaw: Jointly Estimating Scene Layout and Aligning Partial Scans", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300f673/1hQqwdZMWL6", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/12/09128027", "title": "Motion Planning for Convertible Indoor Scene Layout Design", "doi": null, "abstractUrl": "/journal/tg/2021/12/09128027/1l3unTAaNuE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d753", "title": "End-to-End Optimization of Scene Layout", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d753/1m3ooUhHlVC", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3ooUhHlVC", "doi": "10.1109/CVPR42600.2020.00381", "title": "End-to-End Optimization of Scene Layout", "normalizedTitle": "End-to-End Optimization of Scene Layout", "abstract": "We propose an end-to-end variational generative model for scene layout synthesis conditioned on scene graphs. Unlike unconditional scene layout generation, we use scene graphs as an abstract but general representation to guide the synthesis of diverse scene layouts that satisfy relationships included in the scene graph. This gives rise to more flexible control over the synthesis process, allowing various forms of inputs such as scene layouts extracted from sentences or inferred from a single color image. Using our conditional layout synthesizer, we can generate various layouts that share the same structure of the input example. In addition to this conditional generation design, we also integrate a differentiable rendering module that enables layout refinement using only 2D projections of the scene. Given a depth and a semantics map, the differentiable rendering module enables optimizing over the synthesized layout to fit the given input in an analysis-by-synthesis fashion. Experiments suggest that our model achieves higher accuracy and diversity in conditional scene synthesis and allows exemplar-based scene generation from various input forms.", "abstracts": [ { "abstractType": "Regular", "content": "We propose an end-to-end variational generative model for scene layout synthesis conditioned on scene graphs. Unlike unconditional scene layout generation, we use scene graphs as an abstract but general representation to guide the synthesis of diverse scene layouts that satisfy relationships included in the scene graph. This gives rise to more flexible control over the synthesis process, allowing various forms of inputs such as scene layouts extracted from sentences or inferred from a single color image. Using our conditional layout synthesizer, we can generate various layouts that share the same structure of the input example. In addition to this conditional generation design, we also integrate a differentiable rendering module that enables layout refinement using only 2D projections of the scene. Given a depth and a semantics map, the differentiable rendering module enables optimizing over the synthesized layout to fit the given input in an analysis-by-synthesis fashion. Experiments suggest that our model achieves higher accuracy and diversity in conditional scene synthesis and allows exemplar-based scene generation from various input forms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose an end-to-end variational generative model for scene layout synthesis conditioned on scene graphs. Unlike unconditional scene layout generation, we use scene graphs as an abstract but general representation to guide the synthesis of diverse scene layouts that satisfy relationships included in the scene graph. This gives rise to more flexible control over the synthesis process, allowing various forms of inputs such as scene layouts extracted from sentences or inferred from a single color image. Using our conditional layout synthesizer, we can generate various layouts that share the same structure of the input example. In addition to this conditional generation design, we also integrate a differentiable rendering module that enables layout refinement using only 2D projections of the scene. Given a depth and a semantics map, the differentiable rendering module enables optimizing over the synthesized layout to fit the given input in an analysis-by-synthesis fashion. Experiments suggest that our model achieves higher accuracy and diversity in conditional scene synthesis and allows exemplar-based scene generation from various input forms.", "fno": "716800d753", "keywords": [ "Graph Theory", "Image Colour Analysis", "Neural Nets", "Optimisation", "Rendering Computer Graphics", "Scene Graph", "Conditional Layout Synthesizer", "Conditional Generation Design", "Differentiable Rendering Module", "Layout Refinement", "Analysis By Synthesis Fashion", "Conditional Scene Synthesis", "End To End Optimization", "End To End Variational Generative Model", "Scene Layout Synthesis", "Unconditional Scene Layout Generation", "Single Color Image", "Exemplar Based Scene Generation", "Layout", "Three Dimensional Displays", "Semantics", "Decoding", "Rendering Computer Graphics", "Solid Modeling", "Training" ], "authors": [ { "affiliation": "Carnegie Mellon University", "fullName": "Andrew Luo", "givenName": "Andrew", "surname": "Luo", "__typename": "ArticleAuthorType" }, { "affiliation": "Massachusetts Institute of Technology", "fullName": "Zhoutong Zhang", "givenName": "Zhoutong", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Stanford University", "fullName": "Jiajun Wu", "givenName": "Jiajun", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Massachusetts Institute of Technology", "fullName": "Joshua B. Tenenbaum", "givenName": "Joshua B.", "surname": "Tenenbaum", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "3753-3762", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800d743", "articleId": "1m3o2oONVsY", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800d763", "articleId": "1m3oqK2iVIA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isvlsid/2014/3765/0/3765a350", "title": "Simultaneous Two-Dimensional Cell Layout Compaction Using MILP with ASTRAN", "doi": null, "abstractUrl": "/proceedings-article/isvlsid/2014/3765a350/12OmNwvDQwa", "parentPublication": { "id": "proceedings/isvlsid/2014/3765/0", "title": "2014 IEEE Computer Society Annual Symposium on VLSI (ISVLSI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200g670", "title": "Generative Layout Modeling using Constraint Graphs", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200g670/1BmIILGrL1u", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600q6969", "title": "Learning Object Context for Novel-view Scene Layout Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600q6969/1H1iU88XYyI", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600b039", "title": "Towards End-to-End Unified Scene Text Detection and Layout Analysis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600b039/1H1n1YQockU", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/07/08948239", "title": "LayoutGAN: Synthesizing Graphic Layouts With Vector-Wireframe Adversarial Networks", "doi": null, "abstractUrl": "/journal/tp/2021/07/08948239/1geNB7KG1eE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j894", "title": "LayoutVAE: Stochastic Scene Layout Generation From a Label Set", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j894/1hQqjMNDJny", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/12/09128027", "title": "Motion Planning for Convertible Indoor Scene Layout Design", "doi": null, "abstractUrl": "/journal/tg/2021/12/09128027/1l3unTAaNuE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900n3799", "title": "Monte Carlo Scene Search for 3D Scene Understanding", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900n3799/1yeJ3a5CJhe", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900j338", "title": "Semantic Palette: Guiding Scene Generation with Class Proportions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900j338/1yeJbBelsbe", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900d731", "title": "LayoutTransformer: Scene Layout Generation with Conceptual and Spatial Diversity", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900d731/1yeKJqXSUh2", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qpzz6dhLLq", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qpzCVbvokM", "doi": "10.1109/AIVR50618.2020.00049", "title": "Interactive and Scalable Layout Synthesis with Design Templates", "normalizedTitle": "Interactive and Scalable Layout Synthesis with Design Templates", "abstract": "The design of virtual and real spaces is a complex task, as is evidenced by the large number of professionals offering their services. Researchers proposed multiple computational methods that aim to alleviate such complexity. Unfortunately, most methods for layout synthesis are not directly applicable to non-professional consumers, because of usability challenges in terms of computation, user input, and scalability. Hence, we propose a novel layout synthesis system based on design templates. Design templates define geometrical rules for creating rooms, according to the room type and furniture function. With such templates, our system allows a customizable user experience, and is computationally fast while remaining scalable. We demonstrate our method with several example layouts, focusing on both small and large rooms.", "abstracts": [ { "abstractType": "Regular", "content": "The design of virtual and real spaces is a complex task, as is evidenced by the large number of professionals offering their services. Researchers proposed multiple computational methods that aim to alleviate such complexity. Unfortunately, most methods for layout synthesis are not directly applicable to non-professional consumers, because of usability challenges in terms of computation, user input, and scalability. Hence, we propose a novel layout synthesis system based on design templates. Design templates define geometrical rules for creating rooms, according to the room type and furniture function. With such templates, our system allows a customizable user experience, and is computationally fast while remaining scalable. We demonstrate our method with several example layouts, focusing on both small and large rooms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The design of virtual and real spaces is a complex task, as is evidenced by the large number of professionals offering their services. Researchers proposed multiple computational methods that aim to alleviate such complexity. Unfortunately, most methods for layout synthesis are not directly applicable to non-professional consumers, because of usability challenges in terms of computation, user input, and scalability. Hence, we propose a novel layout synthesis system based on design templates. Design templates define geometrical rules for creating rooms, according to the room type and furniture function. With such templates, our system allows a customizable user experience, and is computationally fast while remaining scalable. We demonstrate our method with several example layouts, focusing on both small and large rooms.", "fno": "746300a235", "keywords": [ "Computational Geometry", "Design", "Solid Modelling", "User Interfaces", "Virtual Reality", "Room Type", "Furniture Function", "Design Templates", "Interactive Layout Synthesis", "Scalable Layout Synthesis", "Virtual Space Design", "Real Space Design", "Geometrical Rules", "3 D Scene Modeling", "Furniture Arrangements", "Layout", "TV", "Visualization", "Virtual Reality", "Three Dimensional Displays", "Shape", "User Experience", "Interactive Layout Synthesis", "3 D Scene Modeling", "Automatic Content Creation" ], "authors": [ { "affiliation": "New Jersey Institute of Technology,Newark,NJ", "fullName": "Hameedullah Farooki", "givenName": "Hameedullah", "surname": "Farooki", "__typename": "ArticleAuthorType" }, { "affiliation": "Wayfair,Boston,MA", "fullName": "Esra Ataer-Cansizoglu", "givenName": "Esra", "surname": "Ataer-Cansizoglu", "__typename": "ArticleAuthorType" }, { "affiliation": "Wayfair,Boston,MA", "fullName": "Jae-Woo Choi", "givenName": "Jae-Woo", "surname": "Choi", "__typename": "ArticleAuthorType" }, { "affiliation": "New Jersey Institute of Technology,Newark,NJ", "fullName": "Tomer Weiss", "givenName": "Tomer", "surname": "Weiss", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "235-238", "year": "2020", "issn": null, "isbn": "978-1-7281-7463-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "746300a231", "articleId": "1qpzzyPVUYM", "__typename": "AdjacentArticleType" }, "next": { "fno": "746300a239", "articleId": "1qpzADQ7pAY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/date/2009/3781/0/05090670", "title": "Analog layout synthesis - Recent advances in topological approaches", "doi": null, "abstractUrl": "/proceedings-article/date/2009/05090670/12OmNvvc5O1", "parentPublication": { "id": "proceedings/date/2009/3781/0", "title": "2009 Design, Automation & Test in Europe Conference & Exhibition (DATE'09)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733a041", "title": "Automated Layout Synthesis and Visualization from Images of Interior or Exterior Spaces", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733a041/12OmNyfdOKF", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/idt/2014/8200/0/07038592", "title": "Multi-device layout templates for nanometer analog design", "doi": null, "abstractUrl": "/proceedings-article/idt/2014/07038592/12OmNyz5JVe", "parentPublication": { "id": "proceedings/idt/2014/8200/0", "title": "2014 9th International Design & Test Symposium (IDT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ewdts/2010/9555/0/ewdts109036Intel", "title": "Between standard cells and transistors: Layout templates for Regular Fabrics", "doi": null, "abstractUrl": "/proceedings-article/ewdts/2010/ewdts109036Intel/12OmNzw8j8T", "parentPublication": { "id": "proceedings/ewdts/2010/9555/0", "title": "East-West Design & Test Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/12/08443151", "title": "Fast and Scalable Position-Based Layout Synthesis", "doi": null, "abstractUrl": "/journal/tg/2019/12/08443151/13rRUxlgxOr", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200k0913", "title": "3D-FRONT: 3D Furnished Rooms with layOuts and semaNTics", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200k0913/1BmEzQDqGek", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600h773", "title": "Interactive Image Synthesis with Panoptic Layout Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600h773/1H1mlrOB1Wo", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a839", "title": "Layout Aware Inpainting for Automated Furniture Removal in Indoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a839/1J7W82UYOVa", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvris/2019/5050/0/505000a031", "title": "Rationality Evaluation of Art Furniture Layout Based on Scene Simulation", "doi": null, "abstractUrl": "/proceedings-article/icvris/2019/505000a031/1fHk7oNyImY", "parentPublication": { "id": "proceedings/icvris/2019/5050/0", "title": "2019 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a109", "title": "Automatic Furniture Layout Based on Functional Area Division", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a109/1fHklquet0s", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwtn3tj", "title": "2018 International Conference on Information Networking (ICOIN)", "acronym": "icoin", "groupId": "1000363", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "12OmNvm6VII", "doi": "10.1109/ICOIN.2018.8343074", "title": "Document level polarity classification with attention gated recurrent unit", "normalizedTitle": "Document level polarity classification with attention gated recurrent unit", "abstract": "Reviews can be categorized into two extreme polarities, that is, positive or negative. These reviews from different consumers on a product or service can help a new consumer to make a good decision. Document level sentiment classification aims to understand user generated content or opinion towards certain products or services. In this paper, we propose a recurrent neural network model in classifying positive and negative reviews using gated recurrent unit and attention mechanism. Effectiveness of our proposed model is evaluated using Yelp Review dataset obtained from Yelp Dataset Challenge. Experimental results show that our proposed model can outperform existing models for document level sentiment classification.", "abstracts": [ { "abstractType": "Regular", "content": "Reviews can be categorized into two extreme polarities, that is, positive or negative. These reviews from different consumers on a product or service can help a new consumer to make a good decision. Document level sentiment classification aims to understand user generated content or opinion towards certain products or services. In this paper, we propose a recurrent neural network model in classifying positive and negative reviews using gated recurrent unit and attention mechanism. Effectiveness of our proposed model is evaluated using Yelp Review dataset obtained from Yelp Dataset Challenge. Experimental results show that our proposed model can outperform existing models for document level sentiment classification.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Reviews can be categorized into two extreme polarities, that is, positive or negative. These reviews from different consumers on a product or service can help a new consumer to make a good decision. Document level sentiment classification aims to understand user generated content or opinion towards certain products or services. In this paper, we propose a recurrent neural network model in classifying positive and negative reviews using gated recurrent unit and attention mechanism. Effectiveness of our proposed model is evaluated using Yelp Review dataset obtained from Yelp Dataset Challenge. Experimental results show that our proposed model can outperform existing models for document level sentiment classification.", "fno": "08343074", "keywords": [ "Logic Gates", "Training", "Semantics", "Logistics", "Mathematical Model", "Recurrent Neural Networks", "Testing", "Machine Learning", "Polarity Classification", "Gated Recurrent Unit", "Attention Mechanism" ], "authors": [ { "affiliation": "Lee Kong Chian Faculty of Engineering and Science, Universiti Tunku Abdul Rahman, Selangor, Malaysia", "fullName": "Hoon-Keng Poon", "givenName": "Hoon-Keng", "surname": "Poon", "__typename": "ArticleAuthorType" }, { "affiliation": "Lee Kong Chian Faculty of Engineering and Science, Universiti Tunku Abdul Rahman, Selangor, Malaysia", "fullName": "Wun-She Yap", "givenName": "Wun-She", "surname": "Yap", "__typename": "ArticleAuthorType" }, { "affiliation": "Lee Kong Chian Faculty of Engineering and Science, Universiti Tunku Abdul Rahman, Selangor, Malaysia", "fullName": "Yee-Kai Tee", "givenName": "Yee-Kai", "surname": "Tee", "__typename": "ArticleAuthorType" }, { "affiliation": "Lee Kong Chian Faculty of Engineering and Science, Universiti Tunku Abdul Rahman, Selangor, Malaysia", "fullName": "Bok-Min Goi", "givenName": "Bok-Min", "surname": "Goi", "__typename": "ArticleAuthorType" }, { "affiliation": "Faculty of Information and Communication Technology, Universiti Tunku Abdul Rahman, Perak, Malaysia", "fullName": "Wai-Kong Lee", "givenName": "Wai-Kong", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "icoin", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-01-01T00:00:00", "pubType": "proceedings", "pages": "7-12", "year": "2018", "issn": null, "isbn": "978-1-5386-2290-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08343073", "articleId": "12OmNBTJIOE", "__typename": "AdjacentArticleType" }, "next": { "fno": "08343075", "articleId": "12OmNApLGKz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdmw/2016/5910/0/07836773", "title": "Lexicon Knowledge Extraction with Sentiment Polarity Computation", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2016/07836773/12OmNAXxX4c", "parentPublication": { "id": "proceedings/icdmw/2016/5910/0", "title": "2016 IEEE 16th International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsc/2018/4210/0/421001a685", "title": "Multimodal Object Classification Using Bidirectional Gated Recurrent Unit Networks", "doi": null, "abstractUrl": "/proceedings-article/dsc/2018/421001a685/12OmNBTawgq", "parentPublication": { "id": "proceedings/dsc/2018/4210/0", "title": "2018 IEEE Third International Conference on Data Science in Cyberspace (DSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmla/2016/6167/0/07838269", "title": "Faster Gated Recurrent Units via Conditional Computation", "doi": null, "abstractUrl": "/proceedings-article/icmla/2016/07838269/12OmNx9WT0d", "parentPublication": { "id": "proceedings/icmla/2016/6167/0", "title": "2016 15th IEEE International Conference on Machine Learning and Applications (ICMLA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2018/7447/0/744701a133", "title": "Sentiment Classification with Gated CNN and Spatial Pyramid Pooling", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2018/744701a133/19m3zCbRnWg", "parentPublication": { "id": "proceedings/iiai-aai/2018/7447/0", "title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispds/2021/1738/0/173800a197", "title": "Sentiment Classification of Chinese Railway Review Text Based on Multi-Feature Fusion Gated Recurrent Unit", "doi": null, "abstractUrl": "/proceedings-article/ispds/2021/173800a197/1A3i0aEAlHy", "parentPublication": { "id": "proceedings/ispds/2021/1738/0", "title": "2021 International Conference on Information Science, Parallel and Distributed Systems (ISPDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigdataservice/2019/0059/0/005900a173", "title": "Sentiment Analysis of Chinese Product Reviews using Gated Recurrent Unit", "doi": null, "abstractUrl": "/proceedings-article/bigdataservice/2019/005900a173/1dDLYm5B1S0", "parentPublication": { "id": "proceedings/bigdataservice/2019/0059/0", "title": "2019 IEEE Fifth International Conference on Big Data Computing Service and Applications (BigDataService)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2019/3014/0/301400b366", "title": "Table Structure Extraction with Bi-Directional Gated Recurrent Unit Networks", "doi": null, "abstractUrl": "/proceedings-article/icdar/2019/301400b366/1h81xvfiOB2", "parentPublication": { "id": "proceedings/icdar/2019/3014/0", "title": "2019 International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09005975", "title": "Radically Simplifying Gated Recurrent Architectures Without Loss of Performance", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09005975/1hJrVT4w67m", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2020/7624/0/762400a615", "title": "Stock Market Behaviour Prediction using Long Short-Term Memory Network and Gated Recurrent Unit", "doi": null, "abstractUrl": "/proceedings-article/csci/2020/762400a615/1uGYRf6Wcg0", "parentPublication": { "id": "proceedings/csci/2020/7624/0", "title": "2020 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdacs/2021/2561/0/256100a031", "title": "A Stock Price Forecasting Method Using Autoregressive Integrated Moving Average model and Gated Recurrent Unit Network", "doi": null, "abstractUrl": "/proceedings-article/bdacs/2021/256100a031/1wiRr6qLetq", "parentPublication": { "id": "proceedings/bdacs/2021/2561/0", "title": "2021 International Conference on Big Data Analysis and Computer Science (BDACS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45VObpPx", "doi": "10.1109/CVPR.2018.00035", "title": "GVCNN: Group-View Convolutional Neural Networks for 3D Shape Recognition", "normalizedTitle": "GVCNN: Group-View Convolutional Neural Networks for 3D Shape Recognition", "abstract": "3D shape recognition has attracted much attention recently. Its recent advances advocate the usage of deep features and achieve the state-of-the-art performance. However, existing deep features for 3D shape recognition are restricted to a view-to-shape setting, which learns the shape descriptor from the view-level feature directly. Despite the exciting progress on view-based 3D shape description, the intrinsic hierarchical correlation and discriminability among views have not been well exploited, which is important for 3D shape representation. To tackle this issue, in this paper, we propose a group-view convolutional neural network (GVCNN) framework for hierarchical correlation modeling towards discriminative 3D shape description. The proposed GVCNN framework is composed of a hierarchical view-group-shape architecture, i.e., from the view level, the group level and the shape level, which are organized using a grouping strategy. Concretely, we first use an expanded CNN to extract a view level descriptor. Then, a grouping module is introduced to estimate the content discrimination of each view, based on which all views can be splitted into different groups according to their discriminative level. A group level description can be further generated by pooling from view descriptors. Finally, all group level descriptors are combined into the shape level descriptor according to their discriminative weights. Experimental results and comparison with state-of-the-art methods show that our proposed GVCNN method can achieve a significant performance gain on both the 3D shape classification and retrieval tasks.", "abstracts": [ { "abstractType": "Regular", "content": "3D shape recognition has attracted much attention recently. Its recent advances advocate the usage of deep features and achieve the state-of-the-art performance. However, existing deep features for 3D shape recognition are restricted to a view-to-shape setting, which learns the shape descriptor from the view-level feature directly. Despite the exciting progress on view-based 3D shape description, the intrinsic hierarchical correlation and discriminability among views have not been well exploited, which is important for 3D shape representation. To tackle this issue, in this paper, we propose a group-view convolutional neural network (GVCNN) framework for hierarchical correlation modeling towards discriminative 3D shape description. The proposed GVCNN framework is composed of a hierarchical view-group-shape architecture, i.e., from the view level, the group level and the shape level, which are organized using a grouping strategy. Concretely, we first use an expanded CNN to extract a view level descriptor. Then, a grouping module is introduced to estimate the content discrimination of each view, based on which all views can be splitted into different groups according to their discriminative level. A group level description can be further generated by pooling from view descriptors. Finally, all group level descriptors are combined into the shape level descriptor according to their discriminative weights. Experimental results and comparison with state-of-the-art methods show that our proposed GVCNN method can achieve a significant performance gain on both the 3D shape classification and retrieval tasks.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "3D shape recognition has attracted much attention recently. Its recent advances advocate the usage of deep features and achieve the state-of-the-art performance. However, existing deep features for 3D shape recognition are restricted to a view-to-shape setting, which learns the shape descriptor from the view-level feature directly. Despite the exciting progress on view-based 3D shape description, the intrinsic hierarchical correlation and discriminability among views have not been well exploited, which is important for 3D shape representation. To tackle this issue, in this paper, we propose a group-view convolutional neural network (GVCNN) framework for hierarchical correlation modeling towards discriminative 3D shape description. The proposed GVCNN framework is composed of a hierarchical view-group-shape architecture, i.e., from the view level, the group level and the shape level, which are organized using a grouping strategy. Concretely, we first use an expanded CNN to extract a view level descriptor. Then, a grouping module is introduced to estimate the content discrimination of each view, based on which all views can be splitted into different groups according to their discriminative level. A group level description can be further generated by pooling from view descriptors. Finally, all group level descriptors are combined into the shape level descriptor according to their discriminative weights. Experimental results and comparison with state-of-the-art methods show that our proposed GVCNN method can achieve a significant performance gain on both the 3D shape classification and retrieval tasks.", "fno": "642000a264", "keywords": [ "Convolution", "Feature Extraction", "Feedforward Neural Nets", "Image Classification", "Image Matching", "Image Representation", "Image Retrieval", "Learning Artificial Intelligence", "Shape Recognition", "3 D Shape Recognition", "Deep Features", "View To Shape Setting", "Shape Descriptor", "View Level Feature", "View Based 3 D Shape Description", "3 D Shape Representation", "Group View Convolutional Neural Network Framework", "Discriminative 3 D Shape Description", "Hierarchical View Group Shape Architecture", "View Level Descriptor", "View Descriptors", "Group Level Descriptors", "Shape Level Descriptor", "3 D Shape Classification", "Group View Convolutional Neural Networks", "GVCNN Framework", "Grouping Strategy", "Grouping Module", "3 D Shape Classification Task", "Retrieval Task", "Shape", "Three Dimensional Displays", "Cameras", "Solid Modeling", "Convolutional Neural Networks", "Arrays" ], "authors": [ { "affiliation": null, "fullName": "Yifan Feng", "givenName": "Yifan", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zizhao Zhang", "givenName": "Zizhao", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xibin Zhao", "givenName": "Xibin", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Rongrong Ji", "givenName": "Rongrong", "surname": "Ji", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yue Gao", "givenName": "Yue", "surname": "Gao", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "264-272", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000a254", "articleId": "17D45VsBTYT", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000a273", "articleId": "17D45WKWnI9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2017/2610/0/261001a067", "title": "3D Shape Reconstruction from Sketches via Multi-view Convolutional Networks", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a067/12OmNCu4nbZ", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a945", "title": "Multi-view Convolutional Neural Networks for 3D Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a945/12OmNyfdOPF", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/12/08444765", "title": "VERAM: View-Enhanced Recurrent Attention Model for 3D Shape Classification", "doi": null, "abstractUrl": "/journal/tg/2019/12/08444765/13rRUxYIN4i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a258", "title": "Cross-Domain Image-Based 3D Shape Retrieval by View Sequence Learning", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a258/17D45W9KVJ2", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/06/09947327", "title": "Learning View-Based Graph Convolutional Network for Multi-View 3D Shape Analysis", "doi": null, "abstractUrl": "/journal/tp/2023/06/09947327/1IiLnMjU1KE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b648", "title": "SVNet: A Single View Network for 3D Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b648/1cdOLCtFJEk", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b042", "title": "Pixel2Mesh++: Multi-View 3D Mesh Generation via Deformation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b042/1hQqmZxGLQc", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b847", "title": "View-GCN: View-Based Graph Convolutional Network for 3D Shape Analysis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b847/1m3nH0ZAWxG", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09462521", "title": "View-Aware Geometry-Structure Joint Learning for Single-View 3D Shape Reconstruction", "doi": null, "abstractUrl": "/journal/tp/2022/10/09462521/1uDSvbmzJQc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2020/8666/0/866600a223", "title": "Sketch-based 3D Shape Retrieval with Multi-Silhouette View Based on Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icicta/2020/866600a223/1wRIvGNgH9m", "parentPublication": { "id": "proceedings/icicta/2020/8666/0", "title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1E2vX1vyvqU", "title": "2021 2nd International Conference on Computer Science and Management Technology (ICCSMT)", "acronym": "iccsmt", "groupId": "1840604", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1E2w2K9DWgw", "doi": "10.1109/ICCSMT54525.2021.00065", "title": "PREMA: Part-based REcurrent Multi-view Aggregation Network for 3D Shape Retrieval", "normalizedTitle": "PREMA: Part-based REcurrent Multi-view Aggregation Network for 3D Shape Retrieval", "abstract": "We propose the Part-based Recurrent Multi-view Aggregation network(PREMA) to eliminate the detrimental effects of the practical view defects, such as insufficient view numbers, occlusions or background clutters, and also enhance the discriminative ability of shape representations. Inspired by the fact that human recognize an object mainly by its discriminant parts, we define the multi-view coherent part(MCP), a discriminant part reoccurring in different views. Our PREMA can reliably locate and effectively utilize MCPs to build robust shape representations. Comprehensively, we design a novel Regional Attention Unit(RAU) in PREMA to compute the confidence map for each view, and extract MCPs by applying those maps to view features. PREMA accentuates MCPs via correlating features of different views, and aggregates the part-aware features for shape representation. Finally, we show extensive evaluations to demonstrate that our method achieves the state-of-the-art accuracy for 3D shape retrieval on ModelNet-40 and ShapeNetCore-55 datasets.", "abstracts": [ { "abstractType": "Regular", "content": "We propose the Part-based Recurrent Multi-view Aggregation network(PREMA) to eliminate the detrimental effects of the practical view defects, such as insufficient view numbers, occlusions or background clutters, and also enhance the discriminative ability of shape representations. Inspired by the fact that human recognize an object mainly by its discriminant parts, we define the multi-view coherent part(MCP), a discriminant part reoccurring in different views. Our PREMA can reliably locate and effectively utilize MCPs to build robust shape representations. Comprehensively, we design a novel Regional Attention Unit(RAU) in PREMA to compute the confidence map for each view, and extract MCPs by applying those maps to view features. PREMA accentuates MCPs via correlating features of different views, and aggregates the part-aware features for shape representation. Finally, we show extensive evaluations to demonstrate that our method achieves the state-of-the-art accuracy for 3D shape retrieval on ModelNet-40 and ShapeNetCore-55 datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose the Part-based Recurrent Multi-view Aggregation network(PREMA) to eliminate the detrimental effects of the practical view defects, such as insufficient view numbers, occlusions or background clutters, and also enhance the discriminative ability of shape representations. Inspired by the fact that human recognize an object mainly by its discriminant parts, we define the multi-view coherent part(MCP), a discriminant part reoccurring in different views. Our PREMA can reliably locate and effectively utilize MCPs to build robust shape representations. Comprehensively, we design a novel Regional Attention Unit(RAU) in PREMA to compute the confidence map for each view, and extract MCPs by applying those maps to view features. PREMA accentuates MCPs via correlating features of different views, and aggregates the part-aware features for shape representation. Finally, we show extensive evaluations to demonstrate that our method achieves the state-of-the-art accuracy for 3D shape retrieval on ModelNet-40 and ShapeNetCore-55 datasets.", "fno": "206300a311", "keywords": [ "Feature Extraction", "Image Classification", "Image Representation", "Image Retrieval", "Object Detection", "Object Recognition", "Part Based R Ecurrent Multiview Aggregation Network", "3 D Shape Retrieval", "Part Based Recurrent Multiview Aggregation Network", "Practical View Defects", "Insufficient View Numbers", "Shape Representation", "Discriminant Part", "Robust Shape Representations", "View Features", "PREMA Accentuates MC Ps", "Part Aware Features", "Computer Science", "Solid Modeling", "Three Dimensional Displays", "Shape", "Aggregates", "Feature Extraction", "Robustness", "3 D Retrieval", "Attention Unit", "Multi View Aggregation", "Representation Learning" ], "authors": [ { "affiliation": "School of Computer Science, Beihang University,Beijing,China", "fullName": "Jiongchao Jin", "givenName": "Jiongchao", "surname": "Jin", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science, Beihang University,Beijing,China", "fullName": "Huanqiang Xu", "givenName": "Huanqiang", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science, Beihang University,Beijing,China", "fullName": "Zehao Tang", "givenName": "Zehao", "surname": "Tang", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science, Beihang University,Beijing,China", "fullName": "Pengliang Ji", "givenName": "Pengliang", "surname": "Ji", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science, Beihang University,Beijing,China", "fullName": "Zhang Xiong", "givenName": "Zhang", "surname": "Xiong", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccsmt", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-11-01T00:00:00", "pubType": "proceedings", "pages": "311-318", "year": "2021", "issn": null, "isbn": "978-1-6654-2063-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "206300a305", "articleId": "1E2waPL7zj2", "__typename": "AdjacentArticleType" }, "next": { "fno": "206300a319", "articleId": "1E2wbv80YE0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209a544", "title": "View-Invariant Gesture Recognition Using Nonparametric Shape Descriptor", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a544/12OmNzgeLIp", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08439008", "title": "Learning Discriminative 3D Shape Representations by View Discerning Networks", "doi": null, "abstractUrl": "/journal/tg/2019/10/08439008/13rRUIJuxpE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/12/08444765", "title": "VERAM: View-Enhanced Recurrent Attention Model for 3D Shape Classification", "doi": null, "abstractUrl": "/journal/tg/2019/12/08444765/13rRUxYIN4i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a264", "title": "GVCNN: Group-View Convolutional Neural Networks for 3D Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a264/17D45VObpPx", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2019/02/08565889", "title": "Improved Panoramic Representation via Bidirectional Recurrent View Aggregation for Three-Dimensional Model Retrieval", "doi": null, "abstractUrl": "/magazine/cg/2019/02/08565889/17D45Wuc3am", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/06/09947327", "title": "Learning View-Based Graph Convolutional Network for Multi-View 3D Shape Analysis", "doi": null, "abstractUrl": "/journal/tp/2023/06/09947327/1IiLnMjU1KE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b648", "title": "SVNet: A Single View Network for 3D Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b648/1cdOLCtFJEk", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b847", "title": "View-GCN: View-Based Graph Convolutional Network for 3D Shape Analysis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b847/1m3nH0ZAWxG", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800e503", "title": "Few-Shot Learning of Part-Specific Probability Space for 3D Shape Segmentation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800e503/1m3oq2PAndK", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09462521", "title": "View-Aware Geometry-Structure Joint Learning for Single-View 3D Shape Reconstruction", "doi": null, "abstractUrl": "/journal/tp/2022/10/09462521/1uDSvbmzJQc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cdOEoawzMQ", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cdOLCtFJEk", "doi": "10.1109/ICME.2019.00284", "title": "SVNet: A Single View Network for 3D Shape Recognition", "normalizedTitle": "SVNet: A Single View Network for 3D Shape Recognition", "abstract": "As the great success of deep learning in the 2D image recognition, applying it to 3D shape recognition has recently received much more attention. In this paper, we propose a Convolutional Neural Network (CNN) framework named Single-view Network (SVNet) for 3D recognition. Unlike multiple-view-based methods that aggregate multiple views into one, SVNet extracts and retains the feature of each view separately. Concretely, there are M fully connected layers in the last layer of SVNet, where M denotes the number of views. SVNet integrates the prediction of each fully connected layer to get the final result, which is similar to voting. In addition, our method does data augment by aligning the 3D shape and using normalized normals as the color of 3D shapes. Compared to the state-of-the-art methods, SVNet achieves better performance in 3D shape classification and retrieval on the benchmark dataset. The implementation of SVNet is available at https://github.com/paopaoer/experiment.git.", "abstracts": [ { "abstractType": "Regular", "content": "As the great success of deep learning in the 2D image recognition, applying it to 3D shape recognition has recently received much more attention. In this paper, we propose a Convolutional Neural Network (CNN) framework named Single-view Network (SVNet) for 3D recognition. Unlike multiple-view-based methods that aggregate multiple views into one, SVNet extracts and retains the feature of each view separately. Concretely, there are M fully connected layers in the last layer of SVNet, where M denotes the number of views. SVNet integrates the prediction of each fully connected layer to get the final result, which is similar to voting. In addition, our method does data augment by aligning the 3D shape and using normalized normals as the color of 3D shapes. Compared to the state-of-the-art methods, SVNet achieves better performance in 3D shape classification and retrieval on the benchmark dataset. The implementation of SVNet is available at https://github.com/paopaoer/experiment.git.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As the great success of deep learning in the 2D image recognition, applying it to 3D shape recognition has recently received much more attention. In this paper, we propose a Convolutional Neural Network (CNN) framework named Single-view Network (SVNet) for 3D recognition. Unlike multiple-view-based methods that aggregate multiple views into one, SVNet extracts and retains the feature of each view separately. Concretely, there are M fully connected layers in the last layer of SVNet, where M denotes the number of views. SVNet integrates the prediction of each fully connected layer to get the final result, which is similar to voting. In addition, our method does data augment by aligning the 3D shape and using normalized normals as the color of 3D shapes. Compared to the state-of-the-art methods, SVNet achieves better performance in 3D shape classification and retrieval on the benchmark dataset. The implementation of SVNet is available at https://github.com/paopaoer/experiment.git.", "fno": "955200b648", "keywords": [ "Convolutional Neural Nets", "Feature Extraction", "Image Classification", "Image Retrieval", "Learning Artificial Intelligence", "Shape Recognition", "3 D Shape Recognition", "Multiple View Based Methods", "M Fully Connected Layers", "Deep Learning", "2 D Image Recognition", "SV Net Extraction", "Single View Network Extraction", "Convolutional Neural Network Framework", "CNN Framework", "Feature Extraction", "3 D Shape Classification", "Shape Retrieval", "Three Dimensional Displays", "Shape", "Feature Extraction", "Two Dimensional Displays", "Cameras", "Convolutional Neural Networks", "Aggregates", "SV Net 3 D Shape Recognition CNN Multiple Views" ], "authors": [ { "affiliation": "Shanghai University, China", "fullName": "Shaoshuai Li", "givenName": "Shaoshuai", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai University, China", "fullName": "Fuyan Liu", "givenName": "Fuyan", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-07-01T00:00:00", "pubType": "proceedings", "pages": "1648-1653", "year": "2019", "issn": null, "isbn": "978-1-5386-9552-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "955200b642", "articleId": "1cdOR0xyDFS", "__typename": "AdjacentArticleType" }, "next": { "fno": "955200b654", "articleId": "1cdOGkv1Juw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2015/8391/0/8391a945", "title": "Multi-view Convolutional Neural Networks for 3D Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a945/12OmNyfdOPF", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/12/08444765", "title": "VERAM: View-Enhanced Recurrent Attention Model for 3D Shape Classification", "doi": null, "abstractUrl": "/journal/tg/2019/12/08444765/13rRUxYIN4i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a264", "title": "GVCNN: Group-View Convolutional Neural Networks for 3D Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a264/17D45VObpPx", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a186", "title": "Multi-view Harmonized Bilinear Network for 3D Object Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a186/17D45VsBTWZ", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200a001", "title": "MVTN: Multi-View Transformation Network for 3D Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200a001/1BmEDFUFqw0", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200a397", "title": "Learning Canonical View Representation for 3D Shape Recognition with Arbitrary Views", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200a397/1BmKVyG6sO4", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsmt/2021/2063/0/206300a311", "title": "PREMA: Part-based REcurrent Multi-view Aggregation Network for 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/iccsmt/2021/206300a311/1E2w2K9DWgw", "parentPublication": { "id": "proceedings/iccsmt/2021/2063/0", "title": "2021 2nd International Conference on Computer Science and Management Technology (ICCSMT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/06/09947327", "title": "Learning View-Based Graph Convolutional Network for Multi-View 3D Shape Analysis", "doi": null, "abstractUrl": "/journal/tp/2023/06/09947327/1IiLnMjU1KE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h504", "title": "Learning Relationships for Multi-View 3D Object Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h504/1hVldkkuBAA", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b847", "title": "View-GCN: View-Based Graph Convolutional Network for 3D Shape Analysis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b847/1m3nH0ZAWxG", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1gyr6w5YIIU", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1gyrbF7GVAA", "doi": "10.1109/CVPR.2019.00570", "title": "Recurrent Neural Network for (Un-)Supervised Learning of Monocular Video Visual Odometry and Depth", "normalizedTitle": "Recurrent Neural Network for (Un-)Supervised Learning of Monocular Video Visual Odometry and Depth", "abstract": "Deep learning-based, single-view depth estimation methods have recently shown highly promising results. However, such methods ignore one of the most important features for determining depth in the human vision system, which is motion. We propose a learning-based, multi-view dense depth map and odometry estimation method that uses Recurrent Neural Networks (RNN) and trains utilizing multi-view image reprojection and forward-backward flow-consistency losses. Our model can be trained in a supervised or even unsupervised mode. It is designed for depth and visual odometry estimation from video where the input frames are temporally correlated. However, it also generalizes to single-view depth estimation. Our method produces superior results to the state-of-the-art approaches for single-view and multi-view learning-based depth estimation on the KITTI driving dataset.", "abstracts": [ { "abstractType": "Regular", "content": "Deep learning-based, single-view depth estimation methods have recently shown highly promising results. However, such methods ignore one of the most important features for determining depth in the human vision system, which is motion. We propose a learning-based, multi-view dense depth map and odometry estimation method that uses Recurrent Neural Networks (RNN) and trains utilizing multi-view image reprojection and forward-backward flow-consistency losses. Our model can be trained in a supervised or even unsupervised mode. It is designed for depth and visual odometry estimation from video where the input frames are temporally correlated. However, it also generalizes to single-view depth estimation. Our method produces superior results to the state-of-the-art approaches for single-view and multi-view learning-based depth estimation on the KITTI driving dataset.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Deep learning-based, single-view depth estimation methods have recently shown highly promising results. However, such methods ignore one of the most important features for determining depth in the human vision system, which is motion. We propose a learning-based, multi-view dense depth map and odometry estimation method that uses Recurrent Neural Networks (RNN) and trains utilizing multi-view image reprojection and forward-backward flow-consistency losses. Our model can be trained in a supervised or even unsupervised mode. It is designed for depth and visual odometry estimation from video where the input frames are temporally correlated. However, it also generalizes to single-view depth estimation. Our method produces superior results to the state-of-the-art approaches for single-view and multi-view learning-based depth estimation on the KITTI driving dataset.", "fno": "329300f550", "keywords": [ "Recurrent Neural Nets", "Stereo Image Processing", "Unsupervised Learning", "Video Signal Processing", "Recurrent Neural Network", "Unsupervised Learning", "Monocular Video Visual Odometry", "Single View Depth Estimation Methods", "Multiview Image Reprojection", "Visual Odometry Estimation", "Multiview Learning Based Depth Estimation", "Deep Learning Based Depth Estimation Methods", "Forward Backward Flow Consistency Losses", "Recurrent Neural Networks", "Machine Vision", "Video Sequences", "Pose Estimation", "Network Architecture", "Cameras", "Pattern Recognition", "3 D From Multiview And Sensors", "3 D From Single Image", "Deep Learning" ], "authors": [ { "affiliation": "Univ. of North Carolina at Chapel Hill", "fullName": "Rui Wang", "givenName": "Rui", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. of North Carolina at Chapel Hill", "fullName": "Stephen M. Pizer", "givenName": "Stephen M.", "surname": "Pizer", "__typename": "ArticleAuthorType" }, { "affiliation": "UNC-Chapel Hill", "fullName": "Jan-Michael Frahm", "givenName": "Jan-Michael", "surname": "Frahm", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-06-01T00:00:00", "pubType": "proceedings", "pages": "5550-5559", "year": "2019", "issn": null, "isbn": "978-1-7281-3293-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "329300f540", "articleId": "1gyrTS2XiJa", "__typename": "AdjacentArticleType" }, "next": { "fno": "329300f560", "articleId": "1gyrSNHVDYk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sibgrapi/2017/2219/0/2219a001", "title": "Monocular Visual Odometry with Cyclic Estimation", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2017/2219a001/12OmNzZEABL", "parentPublication": { "id": "proceedings/sibgrapi/2017/2219/0", "title": "2017 30th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a340", "title": "Unsupervised Learning of Monocular Depth Estimation and Visual Odometry with Deep Feature Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a340/17D45WB0qcN", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000a396", "title": "DepthNet: A Recurrent Neural Network Architecture for Monocular Depth Prediction", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000a396/17D45WXIkI7", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956576", "title": "Joint Self-Supervised Monocular Depth Estimation and SLAM", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956576/1IHpbIpwRfW", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a424", "title": "Enhancing Self-Supervised Monocular Depth Estimation with Traditional Visual Odometry", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a424/1ezRBd1Ke4g", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2020/9891/0/09108693", "title": "Depth Prediction for Monocular Direct Visual Odometry", "doi": null, "abstractUrl": "/proceedings-article/crv/2020/09108693/1kpIGiAFaYo", "parentPublication": { "id": "proceedings/crv/2020/9891/0", "title": "2020 17th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b278", "title": "D3VO: Deep Depth, Deep Pose and Deep Uncertainty for Monocular Visual Odometry", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b278/1m3neRj6c1O", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceai/2021/3960/0/396000a464", "title": "Visual Odometry integrated with Self-Supervised Monocular Depth Estimation", "doi": null, "abstractUrl": "/proceedings-article/icceai/2021/396000a464/1xqyLky3tSM", "parentPublication": { "id": "proceedings/icceai/2021/3960/0", "title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlise/2021/1736/0/173600a425", "title": "Spatial and Temporal Monocular Visual Odometry", "doi": null, "abstractUrl": "/proceedings-article/mlise/2021/173600a425/1yOW4ibH7by", "parentPublication": { "id": "proceedings/mlise/2021/1736/0", "title": "2021 International Conference on Machine Learning and Intelligent Systems Engineering (MLISE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900n3179", "title": "Generalizing to the Open World: Deep Visual Odometry with Online Adaptation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900n3179/1yeJYMhbwlO", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1gyr6w5YIIU", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1gys8eWr1Ru", "doi": "10.1109/CVPR.2019.00270", "title": "Learning Spatial Common Sense With Geometry-Aware Recurrent Networks", "normalizedTitle": "Learning Spatial Common Sense With Geometry-Aware Recurrent Networks", "abstract": "We integrate two powerful ideas, geometry and deep visual representation learning, into recurrent network architectures for mobile visual scene understanding. The proposed networks learn to “lift” 2D visual features and integrate them over time into latent 3D feature maps of the scene. They are equipped with differentiable geometric operations, such as projection, unprojection, egomotion estimation and stabilization, in order to compute a geometrically-consistent mapping between the world scene and their 3D latent feature space. We train the proposed architectures to predict novel image views given short frame sequences as input. Their predictions strongly generalize to scenes with a novel number of objects, appearances and configurations, and greatly outperform predictions of previous works that do not consider egomotion stabilization or a space-aware latent feature space. We train the proposed architectures to detect and segment objects in 3D, using the latent 3D feature map as input—as opposed to 2D feature maps computed from video frames. The resulting detections are permanent: they continue to exist even when an object gets occluded or leaves the field of view. Our experiments suggest the proposed space-aware latent feature arrangement and egomotion-stabilized convolutions are essential architectural choices for spatial common sense to emerge in artificial embodied visual agents.", "abstracts": [ { "abstractType": "Regular", "content": "We integrate two powerful ideas, geometry and deep visual representation learning, into recurrent network architectures for mobile visual scene understanding. The proposed networks learn to “lift” 2D visual features and integrate them over time into latent 3D feature maps of the scene. They are equipped with differentiable geometric operations, such as projection, unprojection, egomotion estimation and stabilization, in order to compute a geometrically-consistent mapping between the world scene and their 3D latent feature space. We train the proposed architectures to predict novel image views given short frame sequences as input. Their predictions strongly generalize to scenes with a novel number of objects, appearances and configurations, and greatly outperform predictions of previous works that do not consider egomotion stabilization or a space-aware latent feature space. We train the proposed architectures to detect and segment objects in 3D, using the latent 3D feature map as input—as opposed to 2D feature maps computed from video frames. The resulting detections are permanent: they continue to exist even when an object gets occluded or leaves the field of view. Our experiments suggest the proposed space-aware latent feature arrangement and egomotion-stabilized convolutions are essential architectural choices for spatial common sense to emerge in artificial embodied visual agents.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We integrate two powerful ideas, geometry and deep visual representation learning, into recurrent network architectures for mobile visual scene understanding. The proposed networks learn to “lift” 2D visual features and integrate them over time into latent 3D feature maps of the scene. They are equipped with differentiable geometric operations, such as projection, unprojection, egomotion estimation and stabilization, in order to compute a geometrically-consistent mapping between the world scene and their 3D latent feature space. We train the proposed architectures to predict novel image views given short frame sequences as input. Their predictions strongly generalize to scenes with a novel number of objects, appearances and configurations, and greatly outperform predictions of previous works that do not consider egomotion stabilization or a space-aware latent feature space. We train the proposed architectures to detect and segment objects in 3D, using the latent 3D feature map as input—as opposed to 2D feature maps computed from video frames. The resulting detections are permanent: they continue to exist even when an object gets occluded or leaves the field of view. Our experiments suggest the proposed space-aware latent feature arrangement and egomotion-stabilized convolutions are essential architectural choices for spatial common sense to emerge in artificial embodied visual agents.", "fno": "329300c590", "keywords": [ "Image Representation", "Image Sequences", "Learning Artificial Intelligence", "Motion Estimation", "Object Detection", "Recurrent Neural Nets", "Video Signal Processing", "Differentiable Geometric Operations", "Egomotion Estimation", "Geometrically Consistent Mapping", "3 D Latent Feature Space", "Egomotion Stabilization", "Space Aware Latent Feature Space", "Latent 3 D Feature Map", "2 D Feature Maps", "Space Aware Latent Feature Arrangement", "Egomotion Stabilized Convolutions", "Spatial Common Sense", "Visual Agents", "Geometry Aware Recurrent Networks", "Deep Visual Representation Learning", "Recurrent Network Architectures", "Mobile Visual Scene Understanding", "2 D Visual Features", "Representation Learning", "Visualization", "Solid Modeling", "Three Dimensional Displays", "Tensors", "Recurrent Neural Networks", "Computer Architecture", "Representation Learning", "3 D From Single Image", "Recognition Detection", "Categorization", "Retrieval", "Scene Analysis And Understa" ], "authors": [ { "affiliation": "Carnegie Mellon Univ.", "fullName": "Hsiao-Yu Fish Tung", "givenName": "Hsiao-Yu Fish", "surname": "Tung", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon Univ.", "fullName": "Ricson Cheng", "givenName": "Ricson", "surname": "Cheng", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon Univ.", "fullName": "Katerina Fragkiadaki", "givenName": "Katerina", "surname": "Fragkiadaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-06-01T00:00:00", "pubType": "proceedings", "pages": "2590-2598", "year": "2019", "issn": null, "isbn": "978-1-7281-3293-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "329300c581", "articleId": "1gys12r7Pfq", "__typename": "AdjacentArticleType" }, "next": { "fno": "329300c599", "articleId": "1gyrwFOAOfS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2014/4308/0/4308a579", "title": "Visual Navigation Aid for the Blind in Dynamic Environments", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2014/4308a579/12OmNvDI44K", "parentPublication": { "id": "proceedings/cvprw/2014/4308/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2015/6759/0/07301289", "title": "Semantically-enriched 3D models for common-sense knowledge", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2015/07301289/12OmNxFsmtQ", "parentPublication": { "id": "proceedings/cvprw/2015/6759/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a937", "title": "3D Object Detection with Latent Support Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a937/17D45Xtvp98", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2021/0126/0/09669781", "title": "Structure-Based Protein-Drug Affinity Prediction with Spatial Attention Mechanisms", "doi": null, "abstractUrl": "/proceedings-article/bibm/2021/09669781/1A9VnIQzfIQ", "parentPublication": { "id": "proceedings/bibm/2021/0126/0", "title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2022/6819/0/09995656", "title": "RecurPocket: Recurrent Lmser Network with Gating Mechanism for Protein Binding Site Detection", "doi": null, "abstractUrl": "/proceedings-article/bibm/2022/09995656/1JC3w2WXgYM", "parentPublication": { "id": "proceedings/bibm/2022/6819/0", "title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412440", "title": "Learning Interpretable Representation for 3D Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412440/1tmhN9Q0eg8", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09462521", "title": "View-Aware Geometry-Structure Joint Learning for Single-View 3D Shape Reconstruction", "doi": null, "abstractUrl": "/journal/tp/2022/10/09462521/1uDSvbmzJQc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09524465", "title": "Geometry-Guided Dense Perspective Network for Speech-Driven Facial Animation", "doi": null, "abstractUrl": "/journal/tg/2022/12/09524465/1wpqCsqBU6Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100c110", "title": "3D Shapes Local Geometry Codes Learning with SDF", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100c110/1yNi23FEXja", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900m2482", "title": "CoCoNets: Continuous Contrastive 3D Scene Representations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900m2482/1yeKStk2Rxu", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3nH0ZAWxG", "doi": "10.1109/CVPR42600.2020.00192", "title": "View-GCN: View-Based Graph Convolutional Network for 3D Shape Analysis", "normalizedTitle": "View-GCN: View-Based Graph Convolutional Network for 3D Shape Analysis", "abstract": "View-based approach that recognizes 3D shape through its projected 2D images has achieved state-of-the-art results for 3D shape recognition. The major challenge for view-based approach is how to aggregate multi-view features to be a global shape descriptor. In this work, we propose a novel view-based Graph Convolutional Neural Network, dubbed as view-GCN, to recognize 3D shape based on graph representation of multiple views in flexible view configurations. We first construct view-graph with multiple views as graph nodes, then design a graph convolutional neural network over view-graph to hierarchically learn discriminative shape descriptor considering relations of multiple views. The view-GCN is a hierarchical network based on local and non-local graph convolution for feature transform, and selective view-sampling for graph coarsening. Extensive experiments on benchmark datasets show that view-GCN achieves state-of-the-art results for 3D shape classification and retrieval.", "abstracts": [ { "abstractType": "Regular", "content": "View-based approach that recognizes 3D shape through its projected 2D images has achieved state-of-the-art results for 3D shape recognition. The major challenge for view-based approach is how to aggregate multi-view features to be a global shape descriptor. In this work, we propose a novel view-based Graph Convolutional Neural Network, dubbed as view-GCN, to recognize 3D shape based on graph representation of multiple views in flexible view configurations. We first construct view-graph with multiple views as graph nodes, then design a graph convolutional neural network over view-graph to hierarchically learn discriminative shape descriptor considering relations of multiple views. The view-GCN is a hierarchical network based on local and non-local graph convolution for feature transform, and selective view-sampling for graph coarsening. Extensive experiments on benchmark datasets show that view-GCN achieves state-of-the-art results for 3D shape classification and retrieval.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "View-based approach that recognizes 3D shape through its projected 2D images has achieved state-of-the-art results for 3D shape recognition. The major challenge for view-based approach is how to aggregate multi-view features to be a global shape descriptor. In this work, we propose a novel view-based Graph Convolutional Neural Network, dubbed as view-GCN, to recognize 3D shape based on graph representation of multiple views in flexible view configurations. We first construct view-graph with multiple views as graph nodes, then design a graph convolutional neural network over view-graph to hierarchically learn discriminative shape descriptor considering relations of multiple views. The view-GCN is a hierarchical network based on local and non-local graph convolution for feature transform, and selective view-sampling for graph coarsening. Extensive experiments on benchmark datasets show that view-GCN achieves state-of-the-art results for 3D shape classification and retrieval.", "fno": "716800b847", "keywords": [ "Feature Extraction", "Graph Theory", "Image Classification", "Image Representation", "Learning Artificial Intelligence", "Neural Nets", "Object Recognition", "Shape Recognition", "Graph Coarsening", "View GCN Achieves State Of The Art Results", "3 D Shape Analysis", "View Based Approach", "3 D Shape Recognition", "Multiview Features", "Global Shape Descriptor", "Graph Representation", "Flexible View Configurations", "View Graph", "Graph Nodes", "Discriminative Shape", "Nonlocal Graph Convolution", "Selective View Sampling", "View Based Graph Convolutional Neural Network", "View Based Graph Convolutional Network", "Shape", "Three Dimensional Displays", "Convolution", "Feature Extraction", "Aggregates", "Two Dimensional Displays", "Image Recognition" ], "authors": [ { "affiliation": "Xi'an Jiaotong University, Xi'an, China", "fullName": "Xin Wei", "givenName": "Xin", "surname": "Wei", "__typename": "ArticleAuthorType" }, { "affiliation": "Xi'an Jiaotong University, Xi'an, China", "fullName": "Ruixuan Yu", "givenName": "Ruixuan", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Xi'an Jiaotong University, Xi'an, China", "fullName": "Jian Sun", "givenName": "Jian", "surname": "Sun", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "1847-1856", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800b836", "articleId": "1m3nhp3wVOM", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800b857", "articleId": "1m3o3Bu9VPa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "mags/cg/2019/02/08611124", "title": "Active 3-D Shape Cosegmentation With Graph Convolutional Networks", "doi": null, "abstractUrl": "/magazine/cg/2019/02/08611124/17D45WgziN7", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c598", "title": "FeaStNet: Feature-Steered Graph Convolutions for 3D Shape Analysis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c598/17D45XacGkc", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200c773", "title": "PR-GCN: A Deep Graph Convolutional Network with Point Refinement for 6D Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200c773/1BmF4ZaLuz6", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/06/09947327", "title": "Learning View-Based Graph Convolutional Network for Multi-View 3D Shape Analysis", "doi": null, "abstractUrl": "/journal/tp/2023/06/09947327/1IiLnMjU1KE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b648", "title": "SVNet: A Single View Network for 3D Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b648/1cdOLCtFJEk", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09151005", "title": "FGCN: Deep Feature-based Graph Convolutional Network for Semantic Segmentation of Urban 3D Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09151005/1lPH3VjHAmA", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d802", "title": "CPR-GCN: Conditional Partial-Residual Graph Convolutional Network in Automated Anatomical Labeling of Coronary Arteries", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d802/1m3o8WNJvO0", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b797", "title": "Convolution in the Cloud: Learning Deformable Kernels in 3D Graph Convolution Networks for Point Cloud Analysis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b797/1m3onRq4x3y", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2020/4272/0/427200a079", "title": "MER-GCN: Micro-Expression Recognition Based on Relation Modeling with Graph Convolutional Networks", "doi": null, "abstractUrl": "/proceedings-article/mipr/2020/427200a079/1mAa2bupPgs", "parentPublication": { "id": "proceedings/mipr/2020/4272/0", "title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/08/09355025", "title": "Learning of 3D Graph Convolution Networks for Point Cloud Analysis", "doi": null, "abstractUrl": "/journal/tp/2022/08/09355025/1rgCbgC4Z8s", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyFCvPp", "title": "Tenth International Conference on Information Visualisation (IV'06)", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNC0guAM", "doi": "10.1109/IV.2006.95", "title": "Scalable Pixel-based Visual Interfaces: Challenges and Solutions", "normalizedTitle": "Scalable Pixel-based Visual Interfaces: Challenges and Solutions", "abstract": "The information revolution is creating and publishing vast data sets, such as records of business transactions, environmental statistics and census demographics. In many application domains, this data is collected and indexed by geo-spatial location. The discovery of interesting patterns in such databases through visual analytics is a key to turn this data into valuable information. Challenges arise because newly available geo-spatial data sets often have millions of records, or even far more, they are from multiple and heterogeneous data sources, and the output devices have significantly changed, e.g. high-resolution pixilated displays are increasingly available in both wall-sized and desktop units . New techniques are needed to cope with this scale. In this paper we focus on ways to increase the scalability of pixel-based visual interfaces by adding task on hands scenarios that tightly integrate the data analyst into the exploration of geo-spatial data sets.", "abstracts": [ { "abstractType": "Regular", "content": "The information revolution is creating and publishing vast data sets, such as records of business transactions, environmental statistics and census demographics. In many application domains, this data is collected and indexed by geo-spatial location. The discovery of interesting patterns in such databases through visual analytics is a key to turn this data into valuable information. Challenges arise because newly available geo-spatial data sets often have millions of records, or even far more, they are from multiple and heterogeneous data sources, and the output devices have significantly changed, e.g. high-resolution pixilated displays are increasingly available in both wall-sized and desktop units . New techniques are needed to cope with this scale. In this paper we focus on ways to increase the scalability of pixel-based visual interfaces by adding task on hands scenarios that tightly integrate the data analyst into the exploration of geo-spatial data sets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The information revolution is creating and publishing vast data sets, such as records of business transactions, environmental statistics and census demographics. In many application domains, this data is collected and indexed by geo-spatial location. The discovery of interesting patterns in such databases through visual analytics is a key to turn this data into valuable information. Challenges arise because newly available geo-spatial data sets often have millions of records, or even far more, they are from multiple and heterogeneous data sources, and the output devices have significantly changed, e.g. high-resolution pixilated displays are increasingly available in both wall-sized and desktop units . New techniques are needed to cope with this scale. In this paper we focus on ways to increase the scalability of pixel-based visual interfaces by adding task on hands scenarios that tightly integrate the data analyst into the exploration of geo-spatial data sets.", "fno": "26020032", "keywords": [ "Visual Interfaces", "Scalability", "Visualization" ], "authors": [ { "affiliation": "University of Rostock", "fullName": "Mike Sips", "givenName": "Mike", "surname": "Sips", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Rostock", "fullName": "Jorn Schneidewind", "givenName": "Jorn", "surname": "Schneidewind", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Rostock", "fullName": "Daniel A. Keim", "givenName": "Daniel A.", "surname": "Keim", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Rostock", "fullName": "Heidrun Schumann", "givenName": "Heidrun", "surname": "Schumann", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-07-01T00:00:00", "pubType": "proceedings", "pages": "32-38", "year": "2006", "issn": "1550-6037", "isbn": "0-7695-2602-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "26020025", "articleId": "12OmNzd7bl4", "__typename": "AdjacentArticleType" }, "next": { "fno": "26020039", "articleId": "12OmNqGA5hK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2004/2244/0/01410491", "title": "Web visualization of geo-spatial data using SVG and VRML/X3D", "doi": null, "abstractUrl": "/proceedings-article/icig/2004/01410491/12OmNB9t6nn", "parentPublication": { "id": "proceedings/icig/2004/2244/0", "title": "Proceedings. Third International Conference on Image and Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mdm/2016/0883/1/0883a310", "title": "An Unsupervised Collaborative Approach to Identifying Home and Work Locations", "doi": null, "abstractUrl": "/proceedings-article/mdm/2016/0883a310/12OmNCmGNRw", "parentPublication": { "id": "proceedings/mdm/2016/0883/1", "title": "2016 17th IEEE International Conference on Mobile Data Management (MDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2014/6227/0/07042509", "title": "MovementFinder: Visual analytics of origin-destination patterns from geo-tagged social media", "doi": null, "abstractUrl": "/proceedings-article/vast/2014/07042509/12OmNvmowMi", "parentPublication": { "id": "proceedings/vast/2014/6227/0", "title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2004/8779/0/87790033", "title": "RecMap: Rectangular Map Approximations", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2004/87790033/12OmNyKa6bL", "parentPublication": { "id": "proceedings/ieee-infovis/2004/8779/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2011/935/0/05742383", "title": "Exploring geo-temporal differences using GTdiff", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2011/05742383/12OmNzIUfN9", "parentPublication": { "id": "proceedings/pacificvis/2011/935/0", "title": "2011 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2000/01/v0059", "title": "Designing Pixel-Oriented Visualization Techniques: Theory and Applications", "doi": null, "abstractUrl": "/journal/tg/2000/01/v0059/13rRUwhHcJa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v0749", "title": "Visualization of Geo-spatial Point Sets via Global Shape Transformation and Local Pixel Placement", "doi": null, "abstractUrl": "/journal/tg/2006/05/v0749/13rRUxBa5rM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2006/09/r9026", "title": "Developing Concept-Based User Interfaces for Scientific Computing", "doi": null, "abstractUrl": "/magazine/co/2006/09/r9026/13rRUyoPSSq", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/03/v0255", "title": "Hierarchical Pixel Bar Charts", "doi": null, "abstractUrl": "/journal/tg/2002/03/v0255/13rRUyuegh1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2022/2335/0/233500a151", "title": "SET-STAT-MAP: Extending Parallel Sets for Visualizing Mixed Data", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2022/233500a151/1E2wfuqTfMY", "parentPublication": { "id": "proceedings/pacificvis/2022/2335/0", "title": "2022 IEEE 15th Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvjgWMZ", "title": "2008 12th International Conference Information Visualisation", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNqzu6IZ", "doi": "10.1109/IV.2008.50", "title": "Visualization Enhanced Semantic Wikis for Patent Information", "normalizedTitle": "Visualization Enhanced Semantic Wikis for Patent Information", "abstract": "In this paper we present a new approach for using semantic wikis for collaborative patent search and annotation. We describe an extension that allows integrating interactive visualizations into semantic wikis for getting deeper insights into the classificatory, geographical, and temporal distribution of large patent sets. This approach differs from typical wiki usage scenarios in the sense that it combines automatic content generation based on patent search activities of the users with user driven semantic annotation of patent information, e.g. patent rating, linking with prior art, reviews, translations, discussions, etc. The content generation involves a semantic model that is described in terms of different ontologies for patent information. A modern wiki system is used for semantic annotation, comments, discussions, versioning, notification, and full-text search. Our approach is motivated by using available functionalities of a modern wiki system in combination with visualization techniques to directly implement major user requirements for supporting the knowledge-intensive tasks of patent search and understanding.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we present a new approach for using semantic wikis for collaborative patent search and annotation. We describe an extension that allows integrating interactive visualizations into semantic wikis for getting deeper insights into the classificatory, geographical, and temporal distribution of large patent sets. This approach differs from typical wiki usage scenarios in the sense that it combines automatic content generation based on patent search activities of the users with user driven semantic annotation of patent information, e.g. patent rating, linking with prior art, reviews, translations, discussions, etc. The content generation involves a semantic model that is described in terms of different ontologies for patent information. A modern wiki system is used for semantic annotation, comments, discussions, versioning, notification, and full-text search. Our approach is motivated by using available functionalities of a modern wiki system in combination with visualization techniques to directly implement major user requirements for supporting the knowledge-intensive tasks of patent search and understanding.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we present a new approach for using semantic wikis for collaborative patent search and annotation. We describe an extension that allows integrating interactive visualizations into semantic wikis for getting deeper insights into the classificatory, geographical, and temporal distribution of large patent sets. This approach differs from typical wiki usage scenarios in the sense that it combines automatic content generation based on patent search activities of the users with user driven semantic annotation of patent information, e.g. patent rating, linking with prior art, reviews, translations, discussions, etc. The content generation involves a semantic model that is described in terms of different ontologies for patent information. A modern wiki system is used for semantic annotation, comments, discussions, versioning, notification, and full-text search. Our approach is motivated by using available functionalities of a modern wiki system in combination with visualization techniques to directly implement major user requirements for supporting the knowledge-intensive tasks of patent search and understanding.", "fno": "3268a185", "keywords": [ "Information Visualization", "Semantic Wikis", "Patent Information" ], "authors": [ { "affiliation": null, "fullName": "Mark Giereth", "givenName": "Mark", "surname": "Giereth", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Thomas Ertl", "givenName": "Thomas", "surname": "Ertl", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-07-01T00:00:00", "pubType": "proceedings", "pages": "185-190", "year": "2008", "issn": "1550-6037", "isbn": "978-0-7695-3268-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3268a179", "articleId": "12OmNxWuilJ", "__typename": "AdjacentArticleType" }, "next": { "fno": "3268a191", "articleId": "12OmNwGZNEq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wikis4se/2009/3742/0/05069992", "title": "Leveraging semantic data Wikis for distributed requirements elicitation", "doi": null, "abstractUrl": "/proceedings-article/wikis4se/2009/05069992/12OmNAS9zPt", "parentPublication": { "id": "proceedings/wikis4se/2009/3742/0", "title": "2009 ICSE Workshop on Wikis for Software Engineering (Wikis4SE 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2008/3496/3/3496c147", "title": "Semantic Wiki Where Human and Agents Collaborate", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2008/3496c147/12OmNAhOUNr", "parentPublication": { "id": "proceedings/wi-iat/2008/3496/3", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2011/4408/0/4408a280", "title": "Patent Maintenance Recommendation with Patent Information Network Model", "doi": null, "abstractUrl": "/proceedings-article/icdm/2011/4408a280/12OmNC0PGQI", "parentPublication": { "id": "proceedings/icdm/2011/4408/0", "title": "2011 IEEE 11th International Conference on Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2013/5119/0/5119a206", "title": "The Democratization of Semantic Properties: An Analysis of Semantic Wikis", "doi": null, "abstractUrl": "/proceedings-article/icsc/2013/5119a206/12OmNC3FGkd", "parentPublication": { "id": "proceedings/icsc/2013/5119/0", "title": "2013 IEEE Seventh International Conference on Semantic Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/skg/2012/4794/0/4794a047", "title": "A Survey of RE-specific Wikis for Distributed Requirements Engineering", "doi": null, "abstractUrl": "/proceedings-article/skg/2012/4794a047/12OmNwF0BJr", "parentPublication": { "id": "proceedings/skg/2012/4794/0", "title": "Semantics, Knowledge and Grid, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2011/4596/0/4596a438", "title": "How to Reason by HeaRT in a Semantic Knowledge-Based Wiki", "doi": null, "abstractUrl": "/proceedings-article/ictai/2011/4596a438/12OmNx7ouTs", "parentPublication": { "id": "proceedings/ictai/2011/4596/0", "title": "2011 IEEE 23rd International Conference on Tools with Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2008/3496/1/3496a419", "title": "Semantic Patent Clustering for Biomedical Communities", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2008/3496a419/12OmNynJMQ0", "parentPublication": { "id": "proceedings/wi-iat/2008/3496/1", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi/2007/3026/0/30260435", "title": "Supporting Patent Mining by using Ontology-based Semantic Annotations", "doi": null, "abstractUrl": "/proceedings-article/wi/2007/30260435/12OmNzG4gv7", "parentPublication": { "id": "proceedings/wi/2007/3026/0", "title": "2007 IEEE/WIC/ACM International Conference on Web Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/semapro/2009/3833/0/3833a077", "title": "WiSyMon: Managing Systems Monitoring Information in Semantic Wikis", "doi": null, "abstractUrl": "/proceedings-article/semapro/2009/3833a077/12OmNzlUKgR", "parentPublication": { "id": "proceedings/semapro/2009/3833/0", "title": "Advances in Semantic Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2008/04/mso2008040008", "title": "Semantic Wikis", "doi": null, "abstractUrl": "/magazine/so/2008/04/mso2008040008/13rRUygBwcF", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBC8AAC", "title": "2015 International Conference on Service Science (ICSS)", "acronym": "icss", "groupId": "1800069", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNwcUk0z", "doi": "10.1109/ICSS.2015.12", "title": "Binary Patent Image Retrieval Using the Hierarchical Oriented Gradient Histogram", "normalizedTitle": "Binary Patent Image Retrieval Using the Hierarchical Oriented Gradient Histogram", "abstract": "To confirm the ingenuity of a patent, the drawings appeared in the patent document play a great role in the comparison of similar patent and can further combine with text-based image retrieval for accurate search. Considerable work has been done in image retrieval using shape, color and texture information. However, patent images are usually binary with complex shapes, no color and little texture information, thus little effort has been made specifically for patents. In this paper, we proposed a novel method named the hierarchical oriented gradient histogram, which extracts the local and global gradient distribution of the image. It can be used in binary patent images which are very complex and cannot be easily segmented into shapes. Experiments on a public database demonstrated that the proposed algorithm could get higher accuracy than other state-of-the-art approaches. Because the dimension of feature of an image is less than 200, our method can be utilized for patent image retrieval in real-time.", "abstracts": [ { "abstractType": "Regular", "content": "To confirm the ingenuity of a patent, the drawings appeared in the patent document play a great role in the comparison of similar patent and can further combine with text-based image retrieval for accurate search. Considerable work has been done in image retrieval using shape, color and texture information. However, patent images are usually binary with complex shapes, no color and little texture information, thus little effort has been made specifically for patents. In this paper, we proposed a novel method named the hierarchical oriented gradient histogram, which extracts the local and global gradient distribution of the image. It can be used in binary patent images which are very complex and cannot be easily segmented into shapes. Experiments on a public database demonstrated that the proposed algorithm could get higher accuracy than other state-of-the-art approaches. Because the dimension of feature of an image is less than 200, our method can be utilized for patent image retrieval in real-time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "To confirm the ingenuity of a patent, the drawings appeared in the patent document play a great role in the comparison of similar patent and can further combine with text-based image retrieval for accurate search. Considerable work has been done in image retrieval using shape, color and texture information. However, patent images are usually binary with complex shapes, no color and little texture information, thus little effort has been made specifically for patents. In this paper, we proposed a novel method named the hierarchical oriented gradient histogram, which extracts the local and global gradient distribution of the image. It can be used in binary patent images which are very complex and cannot be easily segmented into shapes. Experiments on a public database demonstrated that the proposed algorithm could get higher accuracy than other state-of-the-art approaches. Because the dimension of feature of an image is less than 200, our method can be utilized for patent image retrieval in real-time.", "fno": "9947a023", "keywords": [ "Patents", "Feature Extraction", "Image Retrieval", "Histograms", "Shape", "Image Color Analysis", "Hierarchical", "Patent", "Binary", "Image Retrieval", "Gradient" ], "authors": [ { "affiliation": null, "fullName": "Hui Ni", "givenName": "Hui", "surname": "Ni", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhenhua Guo", "givenName": "Zhenhua", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Biqing Huang", "givenName": "Biqing", "surname": "Huang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icss", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-05-01T00:00:00", "pubType": "proceedings", "pages": "23-27", "year": "2015", "issn": "2165-3836", "isbn": "978-1-4799-9947-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "9947a017", "articleId": "12OmNCcKQti", "__typename": "AdjacentArticleType" }, "next": { "fno": "9947a028", "articleId": "12OmNCeK2fH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icss/2015/9947/0/9947a028", "title": "Patent Image Classification Using Local-Constrained Linear Coding and Spatial Pyramid Matching", "doi": null, "abstractUrl": "/proceedings-article/icss/2015/9947a028/12OmNCeK2fH", "parentPublication": { "id": "proceedings/icss/2015/9947/0", "title": "2015 International Conference on Service Science (ICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ciis/2017/3886/0/3886a072", "title": "Content-Based Image Retrieval Based on Multi-feature Fusion Optimized by Brain Storm Optimization", "doi": null, "abstractUrl": "/proceedings-article/ciis/2017/3886a072/12OmNx76TTE", "parentPublication": { "id": "proceedings/ciis/2017/3886/0", "title": "2017 International Conference on Computing Intelligence and Information System (CIIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvisp/2017/0612/0/0612a082", "title": "Multilevel Haar Wavelet Transform and Histogram Usage in Content Based Image Retrieval System", "doi": null, "abstractUrl": "/proceedings-article/icvisp/2017/0612a082/12OmNxWcH3y", "parentPublication": { "id": "proceedings/icvisp/2017/0612/0", "title": "2017 International Conference on Vision, Image and Signal Processing (ICVISP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgciot/2015/7910/0/07380556", "title": "Image retrieval using color and texture binary patterns", "doi": null, "abstractUrl": "/proceedings-article/icgciot/2015/07380556/12OmNy5zspY", "parentPublication": { "id": "proceedings/icgciot/2015/7910/0", "title": "2015 International Conference on Green Computing and Internet of Things (ICGCIoT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2016/0806/0/07550786", "title": "Minority costume image retrieval by fusion of color histogram and edge orientation histogram", "doi": null, "abstractUrl": "/proceedings-article/icis/2016/07550786/12OmNyen1lq", "parentPublication": { "id": "proceedings/icis/2016/0806/0", "title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fit/2013/2503/0/2293a107", "title": "Content Based Image Retrieval Using Localized Multi-texton Histogram", "doi": null, "abstractUrl": "/proceedings-article/fit/2013/2293a107/12OmNzDvSgS", "parentPublication": { "id": "proceedings/fit/2013/2503/0", "title": "2013 11th International Conference on Frontiers of Information Technology (FIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500a557", "title": "DeepPatent: Large scale patent drawing recognition and retrieval", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500a557/1B13pAYzp72", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150858", "title": "Diagram Image Retrieval using Sketch-Based Deep Learning and Transfer Learning", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150858/1lPH2QZwIZW", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150994", "title": "Diagram Image Retrieval and Analysis: Challenges and Opportunities", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150994/1lPHtn4c6FW", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itca/2020/0378/0/037800a481", "title": "Image Based Design Patent Retrieval with Classification and Indexing", "doi": null, "abstractUrl": "/proceedings-article/itca/2020/037800a481/1tpBbXPgWqY", "parentPublication": { "id": "proceedings/itca/2020/0378/0", "title": "2020 2nd International Conference on Information Technology and Computer Application (ITCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqJq4iw", "title": "2016 International Conference on Big Data and Smart Computing (BigComp)", "acronym": "bigcomp", "groupId": "1803439", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNyXMQc8", "doi": "10.1109/BIGCOMP.2016.7425808", "title": "Retrieving patents with inverse patent category frequency", "normalizedTitle": "Retrieving patents with inverse patent category frequency", "abstract": "Patent has currently been captured strong attention as a key enabler for the knowledge and information centric companies and institutes. The higher the patent capability required, the more important an effective and efficient patent retrieval system needed. The conventional patent retrieval systems, however, have produced unsatisfactory results for the patent queries, since the inherent search systems would have come from the traditional keyword based models so that it has been inevitable to result in too many unrelated items. This has made the patent experts keep spending a lot of time to refine the results manually. We propose two dynamic ranking algorithms specialized patent-searching method, in which the dynamic interactive retrieval can be achieved. In the real USPTO dataset experiment, the dynamic ranking method shows substantial improvements with respect to time and cost over conventional static ranking approaches.", "abstracts": [ { "abstractType": "Regular", "content": "Patent has currently been captured strong attention as a key enabler for the knowledge and information centric companies and institutes. The higher the patent capability required, the more important an effective and efficient patent retrieval system needed. The conventional patent retrieval systems, however, have produced unsatisfactory results for the patent queries, since the inherent search systems would have come from the traditional keyword based models so that it has been inevitable to result in too many unrelated items. This has made the patent experts keep spending a lot of time to refine the results manually. We propose two dynamic ranking algorithms specialized patent-searching method, in which the dynamic interactive retrieval can be achieved. In the real USPTO dataset experiment, the dynamic ranking method shows substantial improvements with respect to time and cost over conventional static ranking approaches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Patent has currently been captured strong attention as a key enabler for the knowledge and information centric companies and institutes. The higher the patent capability required, the more important an effective and efficient patent retrieval system needed. The conventional patent retrieval systems, however, have produced unsatisfactory results for the patent queries, since the inherent search systems would have come from the traditional keyword based models so that it has been inevitable to result in too many unrelated items. This has made the patent experts keep spending a lot of time to refine the results manually. We propose two dynamic ranking algorithms specialized patent-searching method, in which the dynamic interactive retrieval can be achieved. In the real USPTO dataset experiment, the dynamic ranking method shows substantial improvements with respect to time and cost over conventional static ranking approaches.", "fno": "07425808", "keywords": [ "Patents", "Heuristic Algorithms", "Databases", "Search Problems", "Law", "Tagging" ], "authors": [ { "affiliation": "Department of Industrial Engineering, Inha University, Incheon, South Korea", "fullName": "Justin JongSu Song", "givenName": null, "surname": "Justin JongSu Song", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Industrial Engineering, Inha University, Incheon, South Korea", "fullName": "Wookey Lee", "givenName": null, "surname": "Wookey Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Industrial Engineering, Inha University, Incheon, South Korea", "fullName": "Jafar Afshar", "givenName": "Jafar", "surname": "Afshar", "__typename": "ArticleAuthorType" } ], "idPrefix": "bigcomp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-01-01T00:00:00", "pubType": "proceedings", "pages": "109-114", "year": "2016", "issn": "2375-9356", "isbn": "978-1-4673-8796-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07425807", "articleId": "12OmNCctf7R", "__typename": "AdjacentArticleType" }, "next": { "fno": "07425809", "articleId": "12OmNvJXeCw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/navcomp/2013/5123/0/5123a040", "title": "Technology Mapping of the Underwater Sensor Networks by Patent Documents Using Two Commercial Software", "doi": null, "abstractUrl": "/proceedings-article/navcomp/2013/5123a040/12OmNBgQFQn", "parentPublication": { "id": "proceedings/navcomp/2013/5123/0", "title": "2013 Symposium on Computer and Automation for Offshore Shipbuilding (NAVCOMP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2015/9504/0/9504a751", "title": "Patent Citation Recommendation for Examiners", "doi": null, "abstractUrl": "/proceedings-article/icdm/2015/9504a751/12OmNwDj1hy", "parentPublication": { "id": "proceedings/icdm/2015/9504/0", "title": "2015 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kam/2011/1788/0/06137571", "title": "Competitive Technical Intelligence Analysis Based on Patents Coupling", "doi": null, "abstractUrl": "/proceedings-article/kam/2011/06137571/12OmNx0RIXr", "parentPublication": { "id": "proceedings/kam/2011/1788/0", "title": "2011 Fourth International Symposium on Knowledge Acquisition and Modeling", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdcloud/2014/6719/0/6719a579", "title": "Reducing Noises for Recall-Oriented Patent Retrieval", "doi": null, "abstractUrl": "/proceedings-article/bdcloud/2014/6719a579/12OmNxwENNd", "parentPublication": { "id": "proceedings/bdcloud/2014/6719/0", "title": "2014 IEEE International Conference on Big Data and Cloud Computing (BdCloud)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2015/7367/0/7367c096", "title": "An Ontology-Based Approach for Retrieving Information from Disparate Sectors in Government: The Patent System as an Exemplar", "doi": null, "abstractUrl": "/proceedings-article/hicss/2015/7367c096/12OmNyRxFnn", "parentPublication": { "id": "proceedings/hicss/2015/7367/0", "title": "2015 48th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcdl/2016/4229/0/07559618", "title": "Inventor name disambiguation for a patent database using a random forest and DBSCAN", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2016/07559618/12OmNzCWG69", "parentPublication": { "id": "proceedings/jcdl/2016/4229/0", "title": "2016 IEEE/ACM Joint Conference on Digital Libraries (JCDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitbs/2019/1307/0/130700a241", "title": "Artificial Intelligence Technology Challenges Patent Laws", "doi": null, "abstractUrl": "/proceedings-article/icitbs/2019/130700a241/18AuVBatu0M", "parentPublication": { "id": "proceedings/icitbs/2019/1307/0", "title": "2019 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdeim/2021/8288/0/828800a419", "title": "Analysis and Statistics on Patent Information of Colleges and Universities based on PatSnap: Take Wuhan University of Technology as an Example", "doi": null, "abstractUrl": "/proceedings-article/bdeim/2021/828800a419/1B4miVFw1cA", "parentPublication": { "id": "proceedings/bdeim/2021/8288/0", "title": "2021 2nd International Conference on Big Data Economy and Information Management (BDEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcomp/2020/6034/0/603400a548", "title": "Prior Art Search Using Multi-modal Embedding of Patent Documents", "doi": null, "abstractUrl": "/proceedings-article/bigcomp/2020/603400a548/1jdDulnuKGY", "parentPublication": { "id": "proceedings/bigcomp/2020/6034/0", "title": "2020 IEEE International Conference on Big Data and Smart Computing (BigComp)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcomp/2020/6034/0/603400a558", "title": "Multi-label Patent Classification using Attention-Aware Deep Learning Model", "doi": null, "abstractUrl": "/proceedings-article/bigcomp/2020/603400a558/1jdDvjJf5jq", "parentPublication": { "id": "proceedings/bigcomp/2020/6034/0", "title": "2020 IEEE International Conference on Big Data and Smart Computing (BigComp)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCwUmAi", "title": "Proceedings of the Fourth ACM/IEEE Joint Conference on Digital Libraries", "acronym": "jcdl", "groupId": "1804605", "volume": "0", "displayVolume": "0", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNzXnNum", "doi": "10.1109/JCDL.2004.240483", "title": "Coupling browse and search in highly interactive user interfaces: a study of the relation browser++", "normalizedTitle": "Coupling browse and search in highly interactive user interfaces: a study of the relation browser++", "abstract": "The size and breadth of digital libraries makes it difficult for people to quickly grasp what content is and is not available. Consequently, people usually need an overview of the digital library to help them decide if it is worthwhile to look further. As they do look further, it is helpful for their searching and browsing to get an idea of what is in the collection and how many items are available. The relational browser++(RB++) is a dynamic interface for large information collections. Not only does it allow users to search but also provides an overview of the collection/search results organized in an interactive category structure. It has been applied to two dozen different kinds of collections and this study used it with a film library collection with more than 10000 records.", "abstracts": [ { "abstractType": "Regular", "content": "The size and breadth of digital libraries makes it difficult for people to quickly grasp what content is and is not available. Consequently, people usually need an overview of the digital library to help them decide if it is worthwhile to look further. As they do look further, it is helpful for their searching and browsing to get an idea of what is in the collection and how many items are available. The relational browser++(RB++) is a dynamic interface for large information collections. Not only does it allow users to search but also provides an overview of the collection/search results organized in an interactive category structure. It has been applied to two dozen different kinds of collections and this study used it with a film library collection with more than 10000 records.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The size and breadth of digital libraries makes it difficult for people to quickly grasp what content is and is not available. Consequently, people usually need an overview of the digital library to help them decide if it is worthwhile to look further. As they do look further, it is helpful for their searching and browsing to get an idea of what is in the collection and how many items are available. The relational browser++(RB++) is a dynamic interface for large information collections. Not only does it allow users to search but also provides an overview of the collection/search results organized in an interactive category structure. It has been applied to two dozen different kinds of collections and this study used it with a film library collection with more than 10000 records.", "fno": "01336163", "keywords": [ "User Interfaces", "Software Libraries", "Bars", "Information Retrieval", "Graphical User Interfaces", "Visualization", "Human Computer Interaction", "Human Factors", "Mice", "Filters" ], "authors": [ { "affiliation": "Interaction Design Lab. North Carolina Univ., Chapel Hill, NC, USA", "fullName": "Junliang Zhang", "givenName": null, "surname": "Junliang Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Interaction Design Lab. North Carolina Univ., Chapel Hill, NC, USA", "fullName": "G. Marchionini", "givenName": "G.", "surname": "Marchionini", "__typename": "ArticleAuthorType" } ], "idPrefix": "jcdl", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-01-01T00:00:00", "pubType": "proceedings", "pages": "384", "year": "2004", "issn": null, "isbn": "1-58113-832-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01336162", "articleId": "12OmNxdDFH6", "__typename": "AdjacentArticleType" }, "next": { "fno": "01336164", "articleId": "12OmNAq3hGR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/jcdl/2004/832/0/01336144", "title": "Collection understanding [visualization tools in information retrieval]", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2004/01336144/12OmNBtl1rf", "parentPublication": { "id": "proceedings/jcdl/2004/832/0", "title": "Proceedings of the Fourth ACM/IEEE Joint Conference on Digital Libraries", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2016/2722/0/07590357", "title": "Pheromander: Real-Time Strategy with Digital Pheromones", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2016/07590357/12OmNCbCrY1", "parentPublication": { "id": "proceedings/vs-games/2016/2722/0", "title": "2016 8th International Conference on Games and Virtual Worlds for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciw/2008/3163/0/3163a121", "title": "Graphical History List with Multi-window Support on a Mobile Web Browser", "doi": null, "abstractUrl": "/proceedings-article/iciw/2008/3163a121/12OmNrK9q2F", "parentPublication": { "id": "proceedings/iciw/2008/3163/0", "title": "Internet and Web Applications and Services, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2011/0868/0/06004016", "title": "ImageCube: A Browser for Image Collections Associated with Multi-dimensional Datasets", "doi": null, "abstractUrl": "/proceedings-article/iv/2011/06004016/12OmNweBUCq", "parentPublication": { "id": "proceedings/iv/2011/0868/0", "title": "2011 15th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcse/2009/3570/1/3570a244", "title": "Hyperbolic and Bifocal Browser: Web and File Browser", "doi": null, "abstractUrl": "/proceedings-article/wcse/2009/3570a244/12OmNxaNGp8", "parentPublication": { "id": "proceedings/wcse/2009/3570/1", "title": "2009 WRI World Congress on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icstw/2015/1885/0/07107468", "title": "Test automation for multi-touch user interfaces of industrial applications", "doi": null, "abstractUrl": "/proceedings-article/icstw/2015/07107468/12OmNymjN0W", "parentPublication": { "id": "proceedings/icstw/2015/1885/0", "title": "2015 IEEE Eighth International Conference on Software Testing, Verification and Validation Workshops (ICSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06012203", "title": "The amblr: A mobile spatial audio music browser", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06012203/12OmNynJMPN", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2004/2177/0/21770639", "title": "ORRIL: A Simple Building Blocks Approach to Zoomable User Interfaces", "doi": null, "abstractUrl": "/proceedings-article/iv/2004/21770639/12OmNzC5STU", "parentPublication": { "id": "proceedings/iv/2004/2177/0", "title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2006/03/u3022", "title": "Guest Editors' Introduction: Haptic User Interfaces for Multimedia Systems", "doi": null, "abstractUrl": "/magazine/mu/2006/03/u3022/13rRUILLkAy", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2017/10/mco2017100016", "title": "On-Skin Interfaces", "doi": null, "abstractUrl": "/magazine/co/2017/10/mco2017100016/13rRUygT7dO", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKir9", "title": "2018 22nd International Conference Information Visualisation (IV)", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45VTRow5", "doi": "10.1109/iV.2018.00059", "title": "A Visual Analytics GUI for Multigranular Spatio-Temporal Exploration and Comparison of Open Mobility Data", "normalizedTitle": "A Visual Analytics GUI for Multigranular Spatio-Temporal Exploration and Comparison of Open Mobility Data", "abstract": "Recent technological developments in the fields of positioning and mobile communications gave rise to the availabilityof massive spatio-temporal open datasets about cities. A proper exploitation of these big datasets by decision makers of smart cities could be very useful to analyse and understand mobility patterns, with the final goal of easing many transportation problems, like parking search and traffic. While many research efforts have been aimed at defining powerful visual analytics tools for exploring vehicular trajectory data, to date almost no specifically tailored tools are available to analyse (on-street) parking data and dynamics. To fill this gap, in this paper we present the current state of an on-going research on the development of a visual analytics tool, meant to support decision makers of smart cities in performing multigranular spatio-temporal explorations of mobility open data, like those about parking. Moreover, the proposed GUI offers the possibility to overlay external spatio-temporal datasets as well as to customize the way this data is rendered, to get a better insight on the parking dynamics and its influencing factors.", "abstracts": [ { "abstractType": "Regular", "content": "Recent technological developments in the fields of positioning and mobile communications gave rise to the availabilityof massive spatio-temporal open datasets about cities. A proper exploitation of these big datasets by decision makers of smart cities could be very useful to analyse and understand mobility patterns, with the final goal of easing many transportation problems, like parking search and traffic. While many research efforts have been aimed at defining powerful visual analytics tools for exploring vehicular trajectory data, to date almost no specifically tailored tools are available to analyse (on-street) parking data and dynamics. To fill this gap, in this paper we present the current state of an on-going research on the development of a visual analytics tool, meant to support decision makers of smart cities in performing multigranular spatio-temporal explorations of mobility open data, like those about parking. Moreover, the proposed GUI offers the possibility to overlay external spatio-temporal datasets as well as to customize the way this data is rendered, to get a better insight on the parking dynamics and its influencing factors.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent technological developments in the fields of positioning and mobile communications gave rise to the availabilityof massive spatio-temporal open datasets about cities. A proper exploitation of these big datasets by decision makers of smart cities could be very useful to analyse and understand mobility patterns, with the final goal of easing many transportation problems, like parking search and traffic. While many research efforts have been aimed at defining powerful visual analytics tools for exploring vehicular trajectory data, to date almost no specifically tailored tools are available to analyse (on-street) parking data and dynamics. To fill this gap, in this paper we present the current state of an on-going research on the development of a visual analytics tool, meant to support decision makers of smart cities in performing multigranular spatio-temporal explorations of mobility open data, like those about parking. Moreover, the proposed GUI offers the possibility to overlay external spatio-temporal datasets as well as to customize the way this data is rendered, to get a better insight on the parking dynamics and its influencing factors.", "fno": "720200a309", "keywords": [ "Big Data", "Data Analysis", "Data Visualisation", "Graphical User Interfaces", "Mobile Radio", "Road Traffic", "Traffic Engineering Computing", "Smart Cities", "Multigranular Spatio Temporal Exploration", "Mobility Open Data", "Parking Dynamics", "Visual Analytics GUI", "Open Mobility Data", "Mobile Communications", "Big Datasets", "Vehicular Trajectory Data", "Visual Analytics Tool", "Parking Data Analysis", "Data Visualization", "Graphical User Interfaces", "Tools", "Bars", "Visual Analytics", "Smart Cities", "Roads", "Visual Analitycs", "Smart Cities", "Open Data", "Mobility", "Spatio Temporal Data" ], "authors": [ { "affiliation": null, "fullName": "Camilla Robino", "givenName": "Camilla", "surname": "Robino", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Laura Di Rocco", "givenName": "Laura", "surname": "Di Rocco", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Sergio Di Martino", "givenName": "Sergio", "surname": "Di Martino", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Giovanna Guerrini", "givenName": "Giovanna", "surname": "Guerrini", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Michela Bertolotto", "givenName": "Michela", "surname": "Bertolotto", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-07-01T00:00:00", "pubType": "proceedings", "pages": "309-314", "year": "2018", "issn": null, "isbn": "978-1-5386-7202-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "720200a303", "articleId": "17D45XdBRSA", "__typename": "AdjacentArticleType" }, "next": { "fno": "720200a315", "articleId": "17D45WnnFWN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sasow/2015/8439/0/8439a074", "title": "Exploring Spatio-temporal Properties of Bike-Sharing Systems", "doi": null, "abstractUrl": "/proceedings-article/sasow/2015/8439a074/12OmNrMHOch", "parentPublication": { "id": "proceedings/sasow/2015/8439/0", "title": "2015 IEEE International Conference on Self-Adaptive and Self-Organizing Systems Workshops (SASOW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2006/0591/0/04035742", "title": "Visual Exploration of Spatio-temporal Relationships for Scientific Data", "doi": null, "abstractUrl": "/proceedings-article/vast/2006/04035742/12OmNwGIcB0", "parentPublication": { "id": "proceedings/vast/2006/0591/0", "title": "2006 IEEE Symposium On Visual Analytics Science And Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mdm/2018/4133/0/413301a278", "title": "aSTEP: Aau's Spatio-TEmporal Data Analytics Platform", "doi": null, "abstractUrl": "/proceedings-article/mdm/2018/413301a278/12OmNxvO04f", "parentPublication": { "id": "proceedings/mdm/2018/4133/0", "title": "2018 19th IEEE International Conference on Mobile Data Management (MDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2012/2049/0/06266320", "title": "Dealing with multigranular spatio-temporal databases to manage psychiatric epidemiology data", "doi": null, "abstractUrl": "/proceedings-article/cbms/2012/06266320/12OmNz3bdFs", "parentPublication": { "id": "proceedings/cbms/2012/2049/0", "title": "2012 25th IEEE International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2018/05/mcg2018050026", "title": "Spatio-Temporal Urban Data Analysis: A Visual Analytics Perspective", "doi": null, "abstractUrl": "/magazine/cg/2018/05/mcg2018050026/13WBGTItFGV", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440804", "title": "ForVizor: Visualizing Spatio-Temporal Team Formations in Soccer", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440804/17D45WXIkAs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09903281", "title": "A Visual Analytics System for Improving Attention-based Traffic Forecasting Models", "doi": null, "abstractUrl": "/journal/tg/2023/01/09903281/1GZolp3W1mE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2022/5099/0/509900a001", "title": "STORM-GAN: Spatio-Temporal Meta-GAN for Cross-City Estimation of Human Mobility Responses to COVID-19", "doi": null, "abstractUrl": "/proceedings-article/icdm/2022/509900a001/1KpCiFkLVF6", "parentPublication": { "id": "proceedings/icdm/2022/5099/0", "title": "2022 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2019/2284/0/08986925", "title": "Visual Analytics of Spatio-Temporal Uncertainties for Radiation Monitoring in a Nuclear Leakage Crisis", "doi": null, "abstractUrl": "/proceedings-article/vast/2019/08986925/1hrMAgcwJ2g", "parentPublication": { "id": "proceedings/vast/2019/2284/0", "title": "2019 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaice/2020/9146/0/914600a234", "title": "Spatio-temporal Clustering-based Parking Area Division of Dockless Shared Bicycles", "doi": null, "abstractUrl": "/proceedings-article/icaice/2020/914600a234/1rCga3Pvaz6", "parentPublication": { "id": "proceedings/icaice/2020/9146/0", "title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1kaMxDONP0Y", "title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)", "acronym": "icde", "groupId": "1000178", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1kaMAjbz5fi", "doi": "10.1109/ICDE48307.2020.00156", "title": "vCBIR: A Verifiable Search Engine for Content-Based Image Retrieval", "normalizedTitle": "vCBIR: A Verifiable Search Engine for Content-Based Image Retrieval", "abstract": "We demonstrate vCBIR, a verifiable search engine for Content-Based Image Retrieval. vCBIR allows a small or medium-sized enterprise to outsource its image database to a cloud-based service provider and ensures the integrity of query processing. Like other common data-as-a-service (DaaS) systems, vCBIR consists of three parties: (i) the image owner who outsources its database, (ii) the service provider who executes the authenticated query processing, and (iii) the client who issues search queries. By employing a novel query authentication scheme proposed in our prior work [4], the system not only supports cloud-based image retrieval, but also generates a cryptographic proof for each query, by which the client could verify the integrity of query results. During the demonstration, we will showcase the usage of vCBIR and also provide attendees interactive experience of verifying query results against an untrustworthy service provider through graphical user interface (GUI).", "abstracts": [ { "abstractType": "Regular", "content": "We demonstrate vCBIR, a verifiable search engine for Content-Based Image Retrieval. vCBIR allows a small or medium-sized enterprise to outsource its image database to a cloud-based service provider and ensures the integrity of query processing. Like other common data-as-a-service (DaaS) systems, vCBIR consists of three parties: (i) the image owner who outsources its database, (ii) the service provider who executes the authenticated query processing, and (iii) the client who issues search queries. By employing a novel query authentication scheme proposed in our prior work [4], the system not only supports cloud-based image retrieval, but also generates a cryptographic proof for each query, by which the client could verify the integrity of query results. During the demonstration, we will showcase the usage of vCBIR and also provide attendees interactive experience of verifying query results against an untrustworthy service provider through graphical user interface (GUI).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We demonstrate vCBIR, a verifiable search engine for Content-Based Image Retrieval. vCBIR allows a small or medium-sized enterprise to outsource its image database to a cloud-based service provider and ensures the integrity of query processing. Like other common data-as-a-service (DaaS) systems, vCBIR consists of three parties: (i) the image owner who outsources its database, (ii) the service provider who executes the authenticated query processing, and (iii) the client who issues search queries. By employing a novel query authentication scheme proposed in our prior work [4], the system not only supports cloud-based image retrieval, but also generates a cryptographic proof for each query, by which the client could verify the integrity of query results. During the demonstration, we will showcase the usage of vCBIR and also provide attendees interactive experience of verifying query results against an untrustworthy service provider through graphical user interface (GUI).", "fno": "09101819", "keywords": [ "Authorisation", "Cloud Computing", "Cryptography", "Data Integrity", "Formal Verification", "Graphical User Interfaces", "Image Retrieval", "Outsourcing", "Search Engines", "Small To Medium Enterprises", "User Experience", "Visual Databases", "Web Services", "Content Based Image Retrieval", "V CBIR", "Image Database", "Cloud Based Service Provider", "Authenticated Query Processing", "Search Queries", "Query Authentication", "Cloud Based Image Retrieval", "Untrustworthy Service Provider", "Verifiable Search Engine", "Small Or Medium Sized Enterprise", "Data As A Service Systems", "Daa S Systems", "Graphical User Interface", "Cryptographic Proof", "Interactive Experience", "Feature Extraction", "Graphical User Interfaces", "Query Processing", "Indexes", "Image Retrieval", "Browsers" ], "authors": [ { "affiliation": "Hong Kong Baptist University,Department of Computer Science,Hong Kong, China", "fullName": "Shangwei Guo", "givenName": "Shangwei", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": "Hong Kong Baptist University,Department of Computer Science,Hong Kong, China", "fullName": "Yang Ji", "givenName": "Yang", "surname": "Ji", "__typename": "ArticleAuthorType" }, { "affiliation": "Hong Kong Baptist University,Department of Computer Science,Hong Kong, China", "fullName": "Ce Zhang", "givenName": "Ce", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Hong Kong Baptist University,Department of Computer Science,Hong Kong, China", "fullName": "Cheng Xu", "givenName": "Cheng", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Hong Kong Baptist University,Department of Computer Science,Hong Kong, China", "fullName": "Jianliang Xu", "givenName": "Jianliang", "surname": "Xu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icde", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-04-01T00:00:00", "pubType": "proceedings", "pages": "1730-1733", "year": "2020", "issn": null, "isbn": "978-1-7281-2903-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09101776", "articleId": "1kaMOO9c1zi", "__typename": "AdjacentArticleType" }, "next": { "fno": "09101471", "articleId": "1kaMHLr2Obe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sisap/2009/3765/0/3765a151", "title": "Img(Rummager): An Interactive Content Based Image Retrieval System", "doi": null, "abstractUrl": "/proceedings-article/sisap/2009/3765a151/12OmNBt3qhT", "parentPublication": { "id": "proceedings/sisap/2009/3765/0", "title": "Similarity Search and Applications, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smap/2009/3894/0/3894a056", "title": "Towards Interactive Image Query System for Content-Based Image Retrieval", "doi": null, "abstractUrl": "/proceedings-article/smap/2009/3894a056/12OmNvlxJtC", "parentPublication": { "id": "proceedings/smap/2009/3894/0", "title": "Semantic Media Adaptation and Personalization, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dexa/2009/3763/0/3763a236", "title": "Content-Based Image Retrieval Using Gabor Filtering", "doi": null, "abstractUrl": "/proceedings-article/dexa/2009/3763a236/12OmNwDj18M", "parentPublication": { "id": "proceedings/dexa/2009/3763/0", "title": "2009 20th International Workshop on Database and Expert Systems Application. DEXA 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nbis/2016/0979/0/0979a342", "title": "GPU Acceleration of Content-Based Image Retrieval Based on SIFT Descriptors", "doi": null, "abstractUrl": "/proceedings-article/nbis/2016/0979a342/12OmNxvO04a", "parentPublication": { "id": "proceedings/nbis/2016/0979/0", "title": "2016 19th International Conference on Network-Based Information Systems (NBiS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/4/3336g593", "title": "Tolerant Retrieval and Query Processing in Search Engine", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336g593/12OmNy5zswA", "parentPublication": { "id": "csse/2008/3336/4", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccima/1999/0300/0/03000181", "title": "A Content-Based Video Query Agent Using Feature-Based Image Search Engine", "doi": null, "abstractUrl": "/proceedings-article/iccima/1999/03000181/12OmNzC5Tcm", "parentPublication": { "id": "proceedings/iccima/1999/0300/0", "title": "Computational Intelligence and Multimedia Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uksim/2012/4682/0/4682a283", "title": "Image Query Based Search Engine Using Image Content Retrieval", "doi": null, "abstractUrl": "/proceedings-article/uksim/2012/4682a283/12OmNzXnNuV", "parentPublication": { "id": "proceedings/uksim/2012/4682/0", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/1997/8105/0/81050569", "title": "A content-based search engine on medical images for telemedicine", "doi": null, "abstractUrl": "/proceedings-article/compsac/1997/81050569/12OmNzyp5ZR", "parentPublication": { "id": "proceedings/compsac/1997/8105/0", "title": "Proceedings Twenty-First Annual International Computer Software and Applications Conference (COMPSAC'97)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2011/03/ttk2011030360", "title": "Efficient Relevance Feedback for Content-Based Image Retrieval by Mining User Navigation Patterns", "doi": null, "abstractUrl": "/journal/tk/2011/03/ttk2011030360/13rRUwghd5u", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2019/7474/0/747400c028", "title": "vABS: Towards Verifiable Attribute-Based Search Over Shared Cloud Data", "doi": null, "abstractUrl": "/proceedings-article/icde/2019/747400c028/1aDT0ORwsgw", "parentPublication": { "id": "proceedings/icde/2019/7474/0", "title": "2019 IEEE 35th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBfZSj8", "title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)", "acronym": "vast", "groupId": "1001630", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNwlqhST", "doi": "10.1109/VAST.2012.6400529", "title": "VAST Challenge 2012: Visual analytics for big data", "normalizedTitle": "VAST Challenge 2012: Visual analytics for big data", "abstract": "The 2012 Visual Analytics Science and Technology (VAST) Challenge posed two challenge problems for participants to solve using a combination of visual analytics software and their own analytic reasoning abilities. Challenge 1 (C1) involved visualizing the network health of the fictitious Bank of Money to provide situation awareness and identify emerging trends that could signify network issues. Challenge 2 (C2) involved identifying the issues of concern within a region of the Bank of Money network experiencing operational difficulties utilizing the provided network logs. Participants were asked to analyze the data and provide solutions and explanations for both challenges. The data sets were downloaded by nearly 1100 people by the close of submissions. The VAST Challenge received 40 submissions with participants from 12 different countries, and 14 awards were given.", "abstracts": [ { "abstractType": "Regular", "content": "The 2012 Visual Analytics Science and Technology (VAST) Challenge posed two challenge problems for participants to solve using a combination of visual analytics software and their own analytic reasoning abilities. Challenge 1 (C1) involved visualizing the network health of the fictitious Bank of Money to provide situation awareness and identify emerging trends that could signify network issues. Challenge 2 (C2) involved identifying the issues of concern within a region of the Bank of Money network experiencing operational difficulties utilizing the provided network logs. Participants were asked to analyze the data and provide solutions and explanations for both challenges. The data sets were downloaded by nearly 1100 people by the close of submissions. The VAST Challenge received 40 submissions with participants from 12 different countries, and 14 awards were given.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The 2012 Visual Analytics Science and Technology (VAST) Challenge posed two challenge problems for participants to solve using a combination of visual analytics software and their own analytic reasoning abilities. Challenge 1 (C1) involved visualizing the network health of the fictitious Bank of Money to provide situation awareness and identify emerging trends that could signify network issues. Challenge 2 (C2) involved identifying the issues of concern within a region of the Bank of Money network experiencing operational difficulties utilizing the provided network logs. Participants were asked to analyze the data and provide solutions and explanations for both challenges. The data sets were downloaded by nearly 1100 people by the close of submissions. The VAST Challenge received 40 submissions with participants from 12 different countries, and 14 awards were given.", "fno": "06400529", "keywords": [ "Contest", "Visual Analytics", "Human Information Interaction", "Sense Making", "Evaluation", "Metrics" ], "authors": [ { "affiliation": "Air Force Research Laboratory", "fullName": "Kristen Liggett", "givenName": "Kristen", "surname": "Liggett", "__typename": "ArticleAuthorType" }, { "affiliation": "Air Force Research Laboratory", "fullName": "Paul Havig", "givenName": "Paul", "surname": "Havig", "__typename": "ArticleAuthorType" }, { "affiliation": "Pacific Northwest National Laboratory", "fullName": "Michael Cooper", "givenName": "Michael", "surname": "Cooper", "__typename": "ArticleAuthorType" }, { "affiliation": "Pacific Northwest National Laboratory", "fullName": "Mark Whiting", "givenName": "Mark", "surname": "Whiting", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Massachusetts Lowell", "fullName": "Georges Grinstein", "givenName": "Georges", "surname": "Grinstein", "__typename": "ArticleAuthorType" }, { "affiliation": "Pacific Northwest National Laboratory", "fullName": "Kristin Cook", "givenName": "Kristin", "surname": "Cook", "__typename": "ArticleAuthorType" }, { "affiliation": "National Security Agency", "fullName": "Bohdan Nebesh", "givenName": "Bohdan", "surname": "Nebesh", "__typename": "ArticleAuthorType" }, { "affiliation": "National Security Agency", "fullName": "Celeste Lyn Paul", "givenName": "Celeste Lyn", "surname": "Paul", "__typename": "ArticleAuthorType" } ], "idPrefix": "vast", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-10-01T00:00:00", "pubType": "proceedings", "pages": "251-255", "year": "2012", "issn": null, "isbn": "978-1-4673-4752-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06400526", "articleId": "12OmNyuya6G", "__typename": "AdjacentArticleType" }, "next": { "fno": "06400530", "articleId": "12OmNvStcyS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vast/2015/9783/0/07347674", "title": "A software developer's guide to informal evaluation of Visual Analytics environments using VAST Challenge information", "doi": null, "abstractUrl": "/proceedings-article/vast/2015/07347674/12OmNBpmDMF", "parentPublication": { "id": "proceedings/vast/2015/9783/0", "title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2006/0591/0/04035768", "title": "VAST 2006 Contest ? A Tale of Alderwood", "doi": null, "abstractUrl": "/proceedings-article/vast/2006/04035768/12OmNrMZpG6", "parentPublication": { "id": "proceedings/vast/2006/0591/0", "title": "2006 IEEE Symposium On Visual Analytics Science And Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2014/6227/0/07042536", "title": "VAST challenge 2014: The Kronos incident", "doi": null, "abstractUrl": "/proceedings-article/vast/2014/07042536/12OmNscOUhM", "parentPublication": { "id": "proceedings/vast/2014/6227/0", "title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2012/4752/0/06400519", "title": "Enhancing the “think loop process” with consistent interactions: VAST 2012 Mini Challenge 1 award: Honorable mention for good interaction techniques", "doi": null, "abstractUrl": "/proceedings-article/vast/2012/06400519/12OmNvmG7Uf", "parentPublication": { "id": "proceedings/vast/2012/4752/0", "title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2015/9783/0/07347638", "title": "VAST Challenge 2015: Mayhem at dinofun world", "doi": null, "abstractUrl": "/proceedings-article/vast/2015/07347638/12OmNvs4vol", "parentPublication": { "id": "proceedings/vast/2015/9783/0", "title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2012/4752/0/06400508", "title": "VAST Challenge 2012: Interactively finding anomalies in geo-temporal multivariate data", "doi": null, "abstractUrl": "/proceedings-article/vast/2012/06400508/12OmNwMXnoH", "parentPublication": { "id": "proceedings/vast/2012/4752/0", "title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2012/4752/0/06400511", "title": "3D anomaly bar visualization for large-scale network: VAST 2012 Mini Challenge #1", "doi": null, "abstractUrl": "/proceedings-article/vast/2012/06400511/12OmNy2agPP", "parentPublication": { "id": "proceedings/vast/2012/4752/0", "title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2017/3163/0/08585503", "title": "VAST Challenge 2017: Mystery at the Wildlife Preserve", "doi": null, "abstractUrl": "/proceedings-article/vast/2017/08585503/17D45WHONqn", "parentPublication": { "id": "proceedings/vast/2017/3163/0", "title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2018/6861/0/08802465", "title": "VAST Challenge 2018: Suspense at the Wildlife Preserve", "doi": null, "abstractUrl": "/proceedings-article/vast/2018/08802465/1cJ6Xt2YBRS", "parentPublication": { "id": "proceedings/vast/2018/6861/0", "title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2010/9488/0/05649054", "title": "VAST 2010 Challenge: Arms dealings and pandemics", "doi": null, "abstractUrl": "/proceedings-article/vast/2010/05649054/1iCAmnTV5Uk", "parentPublication": { "id": "proceedings/vast/2010/9488/0", "title": "2010 IEEE Symposium on Visual Analytics Science and Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzX6ceh", "title": "2016 IEEE 32nd International Conference on Data Engineering (ICDE)", "acronym": "icde", "groupId": "1000178", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNxeM45Z", "doi": "10.1109/ICDE.2016.7498285", "title": "MuVE: Efficient Multi-Objective View Recommendation for Visual Data Exploration", "normalizedTitle": "MuVE: Efficient Multi-Objective View Recommendation for Visual Data Exploration", "abstract": "To support effective data exploration, there is a well-recognized need for solutions that can automatically recommend interesting visualizations, which reveal useful insights into the analyzed data. However, such visualizations come at the expense of high data processing costs, where a large number of views are generated to evaluate their usefulness. Those costs are further escalated in the presence of numerical dimensional attributes, due to the potentially large number of possible binning aggregations, which lead to a drastic increase in the number of possible visualizations. To address that challenge, in this paper we propose the MuVE scheme for Multi-Objective View Recommendation for Visual Data Exploration. MuVE introduces a hybrid multi-objective utility function, which captures the impact of binning on the utility of visualizations. Consequently, novel algorithms are proposed for the efficient recommendation of data visualizations that are based on numerical dimensions. The main idea underlying MuVE is to incrementally and progressively assess the different benefits provided by a visualization, which allows an early pruning of a large number of unnecessary operations. Our extensive experimental results show the significant gains provided by our proposed scheme.", "abstracts": [ { "abstractType": "Regular", "content": "To support effective data exploration, there is a well-recognized need for solutions that can automatically recommend interesting visualizations, which reveal useful insights into the analyzed data. However, such visualizations come at the expense of high data processing costs, where a large number of views are generated to evaluate their usefulness. Those costs are further escalated in the presence of numerical dimensional attributes, due to the potentially large number of possible binning aggregations, which lead to a drastic increase in the number of possible visualizations. To address that challenge, in this paper we propose the MuVE scheme for Multi-Objective View Recommendation for Visual Data Exploration. MuVE introduces a hybrid multi-objective utility function, which captures the impact of binning on the utility of visualizations. Consequently, novel algorithms are proposed for the efficient recommendation of data visualizations that are based on numerical dimensions. The main idea underlying MuVE is to incrementally and progressively assess the different benefits provided by a visualization, which allows an early pruning of a large number of unnecessary operations. Our extensive experimental results show the significant gains provided by our proposed scheme.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "To support effective data exploration, there is a well-recognized need for solutions that can automatically recommend interesting visualizations, which reveal useful insights into the analyzed data. However, such visualizations come at the expense of high data processing costs, where a large number of views are generated to evaluate their usefulness. Those costs are further escalated in the presence of numerical dimensional attributes, due to the potentially large number of possible binning aggregations, which lead to a drastic increase in the number of possible visualizations. To address that challenge, in this paper we propose the MuVE scheme for Multi-Objective View Recommendation for Visual Data Exploration. MuVE introduces a hybrid multi-objective utility function, which captures the impact of binning on the utility of visualizations. Consequently, novel algorithms are proposed for the efficient recommendation of data visualizations that are based on numerical dimensions. The main idea underlying MuVE is to incrementally and progressively assess the different benefits provided by a visualization, which allows an early pruning of a large number of unnecessary operations. Our extensive experimental results show the significant gains provided by our proposed scheme.", "fno": "07498285", "keywords": [ "Data Visualization", "Visualization", "Aggregates", "Databases", "Atmospheric Measurements", "Particle Measurements" ], "authors": [ { "affiliation": "School of Information Technology and Electrical Engineering, The University of Queensland, Australia", "fullName": "Humaira Ehsan", "givenName": "Humaira", "surname": "Ehsan", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Information Technology and Electrical Engineering, The University of Queensland, Australia", "fullName": "Mohamed A. Sharaf", "givenName": "Mohamed A.", "surname": "Sharaf", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Computer Science, University of Pittsburgh, Pennsylvania, USA", "fullName": "Panos K. Chrysanthis", "givenName": "Panos K.", "surname": "Chrysanthis", "__typename": "ArticleAuthorType" } ], "idPrefix": "icde", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-05-01T00:00:00", "pubType": "proceedings", "pages": "731-742", "year": "2016", "issn": null, "isbn": "978-1-5090-2020-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07498284", "articleId": "12OmNxecS2Q", "__typename": "AdjacentArticleType" }, "next": { "fno": "07498286", "articleId": "12OmNAlvHuT", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "trans/lt/2015/02/06979251", "title": "Measuring and Visualizing Students’ Behavioral Engagement in Writing Activities", "doi": null, "abstractUrl": "/journal/lt/2015/02/06979251/13rRUwfZC2q", "parentPublication": { "id": "trans/lt", "title": "IEEE Transactions on Learning Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192646", "title": "Beyond Memorability: Visualization Recognition and Recall", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192646/13rRUxASuME", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2018/02/08081825", "title": "Efficient Recommendation of Aggregate Data Visualizations", "doi": null, "abstractUrl": "/journal/tk/2018/02/08081825/13rRUxYrbV5", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2022/8812/0/881200a150", "title": "ASEVis: Visual Exploration of Active System Ensembles to Define Characteristic Measures", "doi": null, "abstractUrl": "/proceedings-article/vis/2022/881200a150/1J6h5DX0BAA", "parentPublication": { "id": "proceedings/vis/2022/8812/0", "title": "2022 IEEE Visualization and Visual Analytics (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807247", "title": "Improving the Robustness of Scagnostics", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807247/1cG67fsQY0g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cic/2019/6739/0/673900a110", "title": "Leveraging Data-Analysis Session Logs for Efficient, Personalized, Interactive View Recommendation", "doi": null, "abstractUrl": "/proceedings-article/cic/2019/673900a110/1hrMeFYVB0k", "parentPublication": { "id": "proceedings/cic/2019/6739/0", "title": "2019 IEEE 5th International Conference on Collaboration and Internet Computing (CIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a397", "title": "Evaluating Query Strategies for Different Feedback Types in Interactive View Recommendation", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a397/1rSR7WElAre", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2021/3892/0/389200a361", "title": "Personalized recommendation method of multimedia network assisted English teaching resources based on particle swarm optimization", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2021/389200a361/1t2ni8uclK8", "parentPublication": { "id": "proceedings/icmtma/2021/3892/0", "title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2021/3335/0/333500a161", "title": "Understanding the Effects of Visualizing Missing Values on Visual Data Exploration", "doi": null, "abstractUrl": "/proceedings-article/vis/2021/333500a161/1yXu9WURx2E", "parentPublication": { "id": "proceedings/vis/2021/3335/0", "title": "2021 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcdl/2021/1770/0/177000a130", "title": "Newsalyze: Effective Communication of Person-Targeting Biases in News Articles", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2021/177000a130/1zJmV9QWbGo", "parentPublication": { "id": "proceedings/jcdl/2021/1770/0", "title": "2021 ACM/IEEE Joint Conference on Digital Libraries (JCDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1EzI2prW2E8", "title": "2022 IEEE Workshop on Design Automation for CPS and IoT (DESTION)", "acronym": "destion", "groupId": "1836784", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1EzI3b1JwmA", "doi": "10.1109/DESTION56136.2022.00017", "title": "Comparing Strategies for Visualizing the High-Dimensional Exploration Behavior of CPS Design Agents", "normalizedTitle": "Comparing Strategies for Visualizing the High-Dimensional Exploration Behavior of CPS Design Agents", "abstract": "The design of cyber-physical systems often involves search within high-dimensional design spaces. When evaluating the performance of algorithms in tasks such as these, the patterns of exploration are often informative and can help support algorithm selection. However, accurately representing these patterns in a way that is human understandable while still preserving the nuanced search complexities in the high-dimensional space is nontrivial. This work specifically examines approaches for visualizing the search trajectories of reinforcement learning agents. We assess trajectories on two exemplar problems: the design of a racecar and the design of an aerial vehicle. We compare and contrast the visualizations produced using PCA, t-SNE, UMAP, TriMap, and PaCMAP. Future work should extend this comparison to a wider variety of exemplar design problems and consider the additional challenges posed by set-based design algorithms (e.g., genetic algorithms, particle swarm optimization).", "abstracts": [ { "abstractType": "Regular", "content": "The design of cyber-physical systems often involves search within high-dimensional design spaces. When evaluating the performance of algorithms in tasks such as these, the patterns of exploration are often informative and can help support algorithm selection. However, accurately representing these patterns in a way that is human understandable while still preserving the nuanced search complexities in the high-dimensional space is nontrivial. This work specifically examines approaches for visualizing the search trajectories of reinforcement learning agents. We assess trajectories on two exemplar problems: the design of a racecar and the design of an aerial vehicle. We compare and contrast the visualizations produced using PCA, t-SNE, UMAP, TriMap, and PaCMAP. Future work should extend this comparison to a wider variety of exemplar design problems and consider the additional challenges posed by set-based design algorithms (e.g., genetic algorithms, particle swarm optimization).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The design of cyber-physical systems often involves search within high-dimensional design spaces. When evaluating the performance of algorithms in tasks such as these, the patterns of exploration are often informative and can help support algorithm selection. However, accurately representing these patterns in a way that is human understandable while still preserving the nuanced search complexities in the high-dimensional space is nontrivial. This work specifically examines approaches for visualizing the search trajectories of reinforcement learning agents. We assess trajectories on two exemplar problems: the design of a racecar and the design of an aerial vehicle. We compare and contrast the visualizations produced using PCA, t-SNE, UMAP, TriMap, and PaCMAP. Future work should extend this comparison to a wider variety of exemplar design problems and consider the additional challenges posed by set-based design algorithms (e.g., genetic algorithms, particle swarm optimization).", "fno": "704000a064", "keywords": [ "Genetic Algorithms", "Learning Artificial Intelligence", "Particle Swarm Optimisation", "Search Problems", "Search Trajectories", "Reinforcement Learning Agents", "Exemplar Problems", "Exemplar Design Problems", "Set Based Design Algorithms", "Genetic Algorithms", "High Dimensional Exploration Behavior", "CPS Design Agents", "Cyber Physical Systems", "High Dimensional Design Spaces", "Algorithm Selection", "Nuanced Search Complexities", "High Dimensional Space", "Dimensionality Reduction", "Visualization", "Design Automation", "Reinforcement Learning", "Cyber Physical Systems", "Trajectory", "Task Analysis", "Reinforcement Learning", "Visualization", "Dimensionality Reduction", "Design", "Cyber Physical Systems" ], "authors": [ { "affiliation": "Carnegie Mellon University,Department of Mechanical Engineering,Pittsburgh,PA,USA", "fullName": "Akash Agrawal", "givenName": "Akash", "surname": "Agrawal", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University,Department of Mechanical Engineering,Pittsburgh,PA,USA", "fullName": "Christopher McComb", "givenName": "Christopher", "surname": "McComb", "__typename": "ArticleAuthorType" } ], "idPrefix": "destion", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-05-01T00:00:00", "pubType": "proceedings", "pages": "64-69", "year": "2022", "issn": null, "isbn": "978-1-6654-7040-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "704000a057", "articleId": "1EzI316CLuM", "__typename": "AdjacentArticleType" }, "next": { "fno": "704000a071", "articleId": "1EzI4dUuJKU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sescps/2015/7088/0/7088a001", "title": "Modeling Challenges for CPS Systems", "doi": null, "abstractUrl": "/proceedings-article/sescps/2015/7088a001/12OmNwF0C11", "parentPublication": { "id": "proceedings/sescps/2015/7088/0", "title": "2015 IEEE/ACM 1st International Workshop on Software Engineering for Smart Cyber-Physical Systems (SEsCPS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pdp/2016/8776/0/8776a609", "title": "Application of a Technique for Secure Embedded Device Design Based on Combining Security Components for Creation of a Perimeter Protection System", "doi": null, "abstractUrl": "/proceedings-article/pdp/2016/8776a609/12OmNwvDQql", "parentPublication": { "id": "proceedings/pdp/2016/8776/0", "title": "2016 24th Euromicro International Conference on Parallel, Distributed, and Network-Based Processing (PDP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/10/ttg2011101487", "title": "A User-Assisted Approach to Visualizing Multidimensional Images", "doi": null, "abstractUrl": "/journal/tg/2011/10/ttg2011101487/13rRUwhpBO2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2021/1762/0/176200a346", "title": "Automatic Generation of Workflows for Efficient Design Space Exploration for Cyber-Physical Systems", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2021/176200a346/1AIMzbqlIwU", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2021/1762/0", "title": "2021 IEEE International Conferences on Internet of Things (iThings) and IEEE Green Computing & Communications (GreenCom) and IEEE Cyber, Physical & Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/qsw/2022/8134/0/813400a038", "title": "Towards quantum-based Search for industrial Data-driven Services", "doi": null, "abstractUrl": "/proceedings-article/qsw/2022/813400a038/1FWmT0krliE", "parentPublication": { "id": "proceedings/qsw/2022/8134/0", "title": "2022 IEEE International Conference on Quantum Software (QSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09930144", "title": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings", "doi": null, "abstractUrl": "/journal/tg/5555/01/09930144/1HMOX2J2VMY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsd/2022/7404/0/740400a632", "title": "Design Space Exploration for Distributed Cyber-Physical Systems: State-of-the-art, Challenges, and Directions", "doi": null, "abstractUrl": "/proceedings-article/dsd/2022/740400a632/1JF8hFrs4kE", "parentPublication": { "id": "proceedings/dsd/2022/7404/0", "title": "2022 25th Euromicro Conference on Digital System Design (DSD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/issre/2022/5132/0/513200a145", "title": "Search-based Testing for Accurate Fault Localization in CPS", "doi": null, "abstractUrl": "/proceedings-article/issre/2022/513200a145/1JhTGgBmZyM", "parentPublication": { "id": "proceedings/issre/2022/5132/0", "title": "2022 IEEE 33rd International Symposium on Software Reliability Engineering (ISSRE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ucc/2022/6087/0/608700a424", "title": "Trusted Virtual Network Embedding in Blockchain-Based Smart Cyber-Physical Systems", "doi": null, "abstractUrl": "/proceedings-article/ucc/2022/608700a424/1LvAcfX8ogM", "parentPublication": { "id": "proceedings/ucc/2022/6087/0", "title": "2022 IEEE/ACM 15th International Conference on Utility and Cloud Computing (UCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ai/2020/03/09351708", "title": "Fast Real-Time Reinforcement Learning for Partially-Observable Large-Scale Systems", "doi": null, "abstractUrl": "/journal/ai/2020/03/09351708/1r518a26TAY", "parentPublication": { "id": "trans/ai", "title": "IEEE Transactions on Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNro0Ib9", "title": "Volume Graphics 2005", "acronym": "vg", "groupId": "1002149", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNwDACgs", "doi": "10.1109/VG.2005.194092", "title": "A multiresolution volume rendering framework for large-scale time-varying data visualization", "normalizedTitle": "A multiresolution volume rendering framework for large-scale time-varying data visualization", "abstract": "We present a new parallel multiresolution volume rendering framework for large-scale time-varying data visualization using the wavelet-based time-space partitioning (WTSP) tree. Utilizing the wavelet transform, a large-scale time-varying data set is converted into a space-time multiresolution data hierarchy, and is stored in a time-space partitioning (TSP) tree. To eliminate the parent-child data dependency for reconstruction and achieve load-balanced rendering, we design an algorithm to partition the WTSP tree and distribute the wavelet-compressed data along hierarchical space-filling curves with error-guided bucketization. At run time, the WTSP tree is traversed according to the user-specified time step and tolerances of both spatial and temporal errors. Data blocks of different spatio-temporal resolutions are reconstructed and rendered to compose the final image in parallel. We demonstrate that our algorithm can reduce the run-time communication cost to a minimum and ensure a well-balanced workload among processors when visualizing gigabytes of time-varying data on a PC cluster.", "abstracts": [ { "abstractType": "Regular", "content": "We present a new parallel multiresolution volume rendering framework for large-scale time-varying data visualization using the wavelet-based time-space partitioning (WTSP) tree. Utilizing the wavelet transform, a large-scale time-varying data set is converted into a space-time multiresolution data hierarchy, and is stored in a time-space partitioning (TSP) tree. To eliminate the parent-child data dependency for reconstruction and achieve load-balanced rendering, we design an algorithm to partition the WTSP tree and distribute the wavelet-compressed data along hierarchical space-filling curves with error-guided bucketization. At run time, the WTSP tree is traversed according to the user-specified time step and tolerances of both spatial and temporal errors. Data blocks of different spatio-temporal resolutions are reconstructed and rendered to compose the final image in parallel. We demonstrate that our algorithm can reduce the run-time communication cost to a minimum and ensure a well-balanced workload among processors when visualizing gigabytes of time-varying data on a PC cluster.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a new parallel multiresolution volume rendering framework for large-scale time-varying data visualization using the wavelet-based time-space partitioning (WTSP) tree. Utilizing the wavelet transform, a large-scale time-varying data set is converted into a space-time multiresolution data hierarchy, and is stored in a time-space partitioning (TSP) tree. To eliminate the parent-child data dependency for reconstruction and achieve load-balanced rendering, we design an algorithm to partition the WTSP tree and distribute the wavelet-compressed data along hierarchical space-filling curves with error-guided bucketization. At run time, the WTSP tree is traversed according to the user-specified time step and tolerances of both spatial and temporal errors. Data blocks of different spatio-temporal resolutions are reconstructed and rendered to compose the final image in parallel. We demonstrate that our algorithm can reduce the run-time communication cost to a minimum and ensure a well-balanced workload among processors when visualizing gigabytes of time-varying data on a PC cluster.", "fno": "01500519", "keywords": [ "Rendering Computer Graphics", "Data Visualisation", "Wavelet Transforms", "Tree Data Structures", "Image Resolution", "Data Compression", "Resource Allocation", "Workstation Clusters", "Parallel Processing", "Large Scale Time Varying Data Visualization", "Parallel Multiresolution Volume Rendering", "Wavelet Based Time Space Partitioning Tree", "Wavelet Transform", "Large Scale Time Varying Data Set", "Space Time Multiresolution Data Hierarchy", "Parent Child Data Dependency", "Load Balanced Rendering", "Wavelet Compressed Data", "Hierarchical Space Filling Curves", "Error Guided Bucketization", "Spatio Temporal Resolutions", "Run Time Communication Cost", "PC Cluster", "Large Scale Systems", "Data Visualization", "Spatial Resolution", "Image Reconstruction", "Wavelet Transforms", "Algorithm Design And Analysis", "Partitioning Algorithms", "Image Resolution", "Rendering Computer Graphics", "Clustering Algorithms" ], "authors": [ { "affiliation": "The Ohio State Univ., Columbus, OH, USA", "fullName": "Chaoli Wang", "givenName": null, "surname": "Chaoli Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jinzhu Gao", "givenName": null, "surname": "Jinzhu Gao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Liya Li", "givenName": null, "surname": "Liya Li", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Han-Wei Shen", "givenName": null, "surname": "Han-Wei Shen", "__typename": "ArticleAuthorType" } ], "idPrefix": "vg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-03-01T00:00:00", "pubType": "proceedings", "pages": "11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223", "year": "2005", "issn": "1727-8376", "isbn": "3-905673-26-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01500517", "articleId": "12OmNzBwGp9", "__typename": "AdjacentArticleType" }, "next": { "fno": "01500520", "articleId": "12OmNx9FhRn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/visual/1992/2897/0/00235230", "title": "Approximation and rendering of volume data using wavelet transforms", "doi": null, "abstractUrl": "/proceedings-article/visual/1992/00235230/12OmNAmVH4g", "parentPublication": { "id": "proceedings/visual/1992/2897/0", "title": "Proceedings Visualization '92", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2008/1966/0/04475469", "title": "Multi-resolution Volume Rendering of Large Time-Varying Data using Video-based Compression", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2008/04475469/12OmNC8Msq0", "parentPublication": { "id": "proceedings/pacificvis/2008/1966/0", "title": "IEEE Pacific Visualization Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/1997/8076/0/80760330", "title": "A new algorithm for multiresolution isosurface extraction", "doi": null, "abstractUrl": "/proceedings-article/iv/1997/80760330/12OmNCcbEjd", "parentPublication": { "id": "proceedings/iv/1997/8076/0", "title": "Proceedings. 1997 IEEE Conference on Information Visualization (Cat. No.97TB100165)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1994/6952/1/00413353", "title": "Multiresolution detection of coherent radar targets", "doi": null, "abstractUrl": "/proceedings-article/icip/1994/00413353/12OmNCd2rVG", "parentPublication": { "id": "proceedings/icip/1994/6952/3", "title": "Proceedings of 1st International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1994/6952/2/00413687", "title": "Multiresolution tomographic reconstruction using wavelets", "doi": null, "abstractUrl": "/proceedings-article/icip/1994/00413687/12OmNyRxFIT", "parentPublication": { "id": "proceedings/icip/1994/6952/2", "title": "Proceedings of 1st International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1994/6952/2/00413646", "title": "Unsupervised multiresolution texture segmentation using wavelet decomposition", "doi": null, "abstractUrl": "/proceedings-article/icip/1994/00413646/12OmNzaQobJ", "parentPublication": { "id": "proceedings/icip/1994/6952/2", "title": "Proceedings of 1st International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2014/4274/0/4274a990", "title": "A Novel Time-Frequency Analysis Approach for Nonstationary Time Series Using Multiresolution Wavelet", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2014/4274a990/12OmNzkMlGu", "parentPublication": { "id": "proceedings/icdmw/2014/4274/0", "title": "2014 IEEE International Conference on Data Mining Workshop (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1029", "title": "LOD Map - A Visual Interface for Navigating Multiresolution Volume Visualization", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1029/13rRUxcKzVe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/00809908", "title": "Multiresolution techniques for interactive texture-based volume visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/00809908/1h0KNaivmHm", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dagstuhl/1997/0503/0/01423128", "title": "Performance Evaluation of Multiresolution Isosurface Rendering", "doi": null, "abstractUrl": "/proceedings-article/dagstuhl/1997/01423128/1h0N4u2RHWg", "parentPublication": { "id": "proceedings/dagstuhl/1997/0503/0", "title": "Dagstuhl '97 - Scientific Visualization Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCfAPCc", "title": "2012 16th International Conference on Information Visualisation", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNxE2mWP", "doi": "10.1109/IV.2012.15", "title": "Implementation and Evaluation of an Enhanced H-tree Layout Pedigree Visualization", "normalizedTitle": "Implementation and Evaluation of an Enhanced H-tree Layout Pedigree Visualization", "abstract": "The constant growth of available genealogical information has encouraged the research of visualization techniques capable of representing the corresponding large amount of data. An H-Tree Layout has been recently proposed to represent pedigree data as a way to overcome some of the limitations of traditional representations. However, this new method has its own limitations which may hinder its adoption. In this paper, we propose some enhancements to the H-Tree Layout pedigree visualization method in order to overcome some of the identified limitations. An implementation of the proposed enhancements and results of a preliminary evaluation are also provided.", "abstracts": [ { "abstractType": "Regular", "content": "The constant growth of available genealogical information has encouraged the research of visualization techniques capable of representing the corresponding large amount of data. An H-Tree Layout has been recently proposed to represent pedigree data as a way to overcome some of the limitations of traditional representations. However, this new method has its own limitations which may hinder its adoption. In this paper, we propose some enhancements to the H-Tree Layout pedigree visualization method in order to overcome some of the identified limitations. An implementation of the proposed enhancements and results of a preliminary evaluation are also provided.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The constant growth of available genealogical information has encouraged the research of visualization techniques capable of representing the corresponding large amount of data. An H-Tree Layout has been recently proposed to represent pedigree data as a way to overcome some of the limitations of traditional representations. However, this new method has its own limitations which may hinder its adoption. In this paper, we propose some enhancements to the H-Tree Layout pedigree visualization method in order to overcome some of the identified limitations. An implementation of the proposed enhancements and results of a preliminary evaluation are also provided.", "fno": "4771a024", "keywords": [ "Information Visualization", "Genealogy", "Pedigree", "H Tree Layout" ], "authors": [ { "affiliation": null, "fullName": "Joao Miguel Santos", "givenName": "Joao Miguel", "surname": "Santos", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Paulo Dias", "givenName": "Paulo", "surname": "Dias", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Beatriz Sousa Santos", "givenName": "Beatriz Sousa", "surname": "Santos", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-07-01T00:00:00", "pubType": "proceedings", "pages": "24-29", "year": "2012", "issn": "1550-6037", "isbn": "978-1-4673-2260-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4771a018", "articleId": "12OmNrEL2zK", "__typename": "AdjacentArticleType" }, "next": { "fno": "4771a030", "articleId": "12OmNvvLi3Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cis/2011/4584/0/4584a119", "title": "Phylogenetic Tree of DNA Sequences Constructed by a New Method-H Curve", "doi": null, "abstractUrl": "/proceedings-article/cis/2011/4584a119/12OmNBO3Kjk", "parentPublication": { "id": "proceedings/cis/2011/4584/0", "title": "2011 Seventh International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispan/2008/3125/0/3125a281", "title": "Three-Dimensional Layout of On-Chip Tree-Based Networks", "doi": null, "abstractUrl": "/proceedings-article/ispan/2008/3125a281/12OmNqGRGhZ", "parentPublication": { "id": "proceedings/ispan/2008/3125/0", "title": "Parallel Architectures, Algorithms, and Networks, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/glsv/1991/2170/0/00143936", "title": "Area efficient binary tree layout", "doi": null, "abstractUrl": "/proceedings-article/glsv/1991/00143936/12OmNxzMnW8", "parentPublication": { "id": "proceedings/glsv/1991/2170/0", "title": "Proceedings First Great Lakes Symposium on VLSI", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2013/5049/0/5049a422", "title": "Extending the H-Tree Layout Pedigree: An Evaluation", "doi": null, "abstractUrl": "/proceedings-article/iv/2013/5049a422/12OmNzdGnwO", "parentPublication": { "id": "proceedings/iv/2013/5049/0", "title": "2013 17th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/03/ttg2011030290", "title": "Hi-Trees and Their Layout", "doi": null, "abstractUrl": "/journal/tg/2011/03/ttg2011030290/13rRUwInvsL", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061301", "title": "Rapid Graph Layout Using Space Filling Curves", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061301/13rRUx0xPIx", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/1989/04/t0526", "title": "On Implementing Large Binary Tree Architectures in VLSI and WSI", "doi": null, "abstractUrl": "/journal/tc/1989/04/t0526/13rRUxcbnBr", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010061063", "title": "PedVis: A Structured, Space-Efficient Technique for Pedigree Visualization", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010061063/13rRUy0qnGg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a659", "title": "Immersive Pedigree Graph Visualisations", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a659/1tnY3QRjZJK", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNASraww", "title": "2009 IEEE Pacific Visualization Symposium", "acronym": "pacificvis", "groupId": "1001657", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNy4r3Zf", "doi": "10.1109/PACIFICVIS.2009.4906841", "title": "Point-based tree representation: A new approach for large hierarchies", "normalizedTitle": "Point-based tree representation: A new approach for large hierarchies", "abstract": "Space-filling layout techniques for tree representations are frequently used when the available screen space is small or the data set is large. In this paper, we propose a new approach to space-filling tree representations, which uses mechanisms from the point-based rendering paradigm. Additionally, helpful interaction techniques that tie in with our layout are presented. We will relate our new technique to established space-filling techniques along the lines of a newly developed classification and also evaluate it numerically using the measures of the Ink-Paper-Ratio and overplotted%.", "abstracts": [ { "abstractType": "Regular", "content": "Space-filling layout techniques for tree representations are frequently used when the available screen space is small or the data set is large. In this paper, we propose a new approach to space-filling tree representations, which uses mechanisms from the point-based rendering paradigm. Additionally, helpful interaction techniques that tie in with our layout are presented. We will relate our new technique to established space-filling techniques along the lines of a newly developed classification and also evaluate it numerically using the measures of the Ink-Paper-Ratio and overplotted%.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Space-filling layout techniques for tree representations are frequently used when the available screen space is small or the data set is large. In this paper, we propose a new approach to space-filling tree representations, which uses mechanisms from the point-based rendering paradigm. Additionally, helpful interaction techniques that tie in with our layout are presented. We will relate our new technique to established space-filling techniques along the lines of a newly developed classification and also evaluate it numerically using the measures of the Ink-Paper-Ratio and overplotted%.", "fno": "04906841", "keywords": [], "authors": [ { "affiliation": "University of Rostock, Germany", "fullName": "Hans-Jorg Schulz", "givenName": "Hans-Jorg", "surname": "Schulz", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Rostock, Germany", "fullName": "Steffen Hadlak", "givenName": "Steffen", "surname": "Hadlak", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Rostock, Germany", "fullName": "Heidrun Schumann", "givenName": "Heidrun", "surname": "Schumann", "__typename": "ArticleAuthorType" } ], "idPrefix": "pacificvis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-04-01T00:00:00", "pubType": "proceedings", "pages": "81-88", "year": "2009", "issn": null, "isbn": "978-1-4244-4404-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04906858", "articleId": "12OmNwcCIQd", "__typename": "AdjacentArticleType" }, "next": { "fno": "04906842", "articleId": "12OmNy87QAS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dac/2000/2428/0/24280464", "title": "Block Placement with Symmetry Constraints Based on the O-Tree Non-Slicing Representation", "doi": null, "abstractUrl": "/proceedings-article/dac/2000/24280464/12OmNAtK4fp", "parentPublication": { "id": "proceedings/dac/2000/2428/0", "title": "Design Automation Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2007/2928/0/29280429", "title": "Three-Dimensional EncCon Tree", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2007/29280429/12OmNBkP3Ed", "parentPublication": { "id": "proceedings/cgiv/2007/2928/0", "title": "Computer Graphics, Imaging and Visualisation (CGIV 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2005/2790/0/27900024", "title": "A Note on Space-Filling Visualizations and Space-Filling Curves", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2005/27900024/12OmNCd2rMj", "parentPublication": { "id": "proceedings/ieee-infovis/2005/2790/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ivapp/2014/8132/0/07294397", "title": "Visualization of varying hierarchies by stable layout of voronoi treemaps", "doi": null, "abstractUrl": "/proceedings-article/ivapp/2014/07294397/12OmNqIQS8i", "parentPublication": { "id": "proceedings/ivapp/2014/8132/0", "title": "2014 International Conference on Information Visualization Theory and Applications (IVAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/infvis/2005/9464/0/01532145", "title": "A note on space-filling visualizations and space-filling curves", "doi": null, "abstractUrl": "/proceedings-article/infvis/2005/01532145/12OmNwMob7V", "parentPublication": { "id": "proceedings/infvis/2005/9464/0", "title": "IEEE Symposium on Information Visualization (InfoVis 05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2013/4797/0/06596149", "title": "A generative layout approach for rooted tree drawings", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2013/06596149/12OmNxXCGLc", "parentPublication": { "id": "proceedings/pacificvis/2013/4797/0", "title": "2013 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2005/2790/0/01532145", "title": "A note on space-filling visualizations and space-filling curves", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2005/01532145/12OmNy6ZrZZ", "parentPublication": { "id": "proceedings/ieee-infovis/2005/2790/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363377", "title": "The Mobile Tree Browser: A Space Filling Information Visualization for Browsing Labelled Hierarchies on Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363377/12OmNzaQoa1", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2013/5049/0/5049a422", "title": "Extending the H-Tree Layout Pedigree: An Evaluation", "doi": null, "abstractUrl": "/proceedings-article/iv/2013/5049a422/12OmNzdGnwO", "parentPublication": { "id": "proceedings/iv/2013/5049/0", "title": "2013 17th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050598", "title": "Point-Based Visualization for Large Hierarchies", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050598/13rRUxASupx", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz5JC0w", "title": "Field-Programmable Custom Computing Machines, Annual IEEE Symposium on", "acronym": "fccm", "groupId": "1000294", "volume": "0", "displayVolume": "0", "year": "1995", "__typename": "ProceedingType" }, "article": { "id": "12OmNBiPRCp", "doi": "10.1109/FPGA.1995.477417", "title": "Acceleration of template-based ray casting for volume visualization using FPGAs", "normalizedTitle": "Acceleration of template-based ray casting for volume visualization using FPGAs", "abstract": "Abstract: Volume visualization is used heavily to view simulated or collected data sets in such applications as medical imaging, computational fluid dynamics, and climate modeling. However, software and low-cost hardware implementations of visualization algorithms do not have sufficient performance for interactive viewing. This paper discusses methods for low-cost, hardware acceleration of volume visualization using a PC-hosted FPGA board. Our methods focus on volume rendering approaches, since these techniques are widely used and are computationally expensive; our primary method uses a template-based, ray-casting algorithm. This hardware implementation is substantially faster than a software-only version running on the host PC.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract: Volume visualization is used heavily to view simulated or collected data sets in such applications as medical imaging, computational fluid dynamics, and climate modeling. However, software and low-cost hardware implementations of visualization algorithms do not have sufficient performance for interactive viewing. This paper discusses methods for low-cost, hardware acceleration of volume visualization using a PC-hosted FPGA board. Our methods focus on volume rendering approaches, since these techniques are widely used and are computationally expensive; our primary method uses a template-based, ray-casting algorithm. This hardware implementation is substantially faster than a software-only version running on the host PC.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract: Volume visualization is used heavily to view simulated or collected data sets in such applications as medical imaging, computational fluid dynamics, and climate modeling. However, software and low-cost hardware implementations of visualization algorithms do not have sufficient performance for interactive viewing. This paper discusses methods for low-cost, hardware acceleration of volume visualization using a PC-hosted FPGA board. Our methods focus on volume rendering approaches, since these techniques are widely used and are computationally expensive; our primary method uses a template-based, ray-casting algorithm. This hardware implementation is substantially faster than a software-only version running on the host PC.", "fno": "70860116", "keywords": [ "Field Programmable Gate Arrays Logic Arrays Data Visualisation Rendering Computer Graphics Template Based Ray Casting Acceleration Volume Visualization FPG As Simulated Data Medical Imaging Computational Fluid Dynamics Climate Modeling Interactive Viewing Volume Rendering" ], "authors": [ { "affiliation": "Dept. of Electr. & Comput. Eng., Rutgers Univ., Piscataway, NJ, USA", "fullName": "M. Dao", "givenName": "M.", "surname": "Dao", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electr. & Comput. Eng., Rutgers Univ., Piscataway, NJ, USA", "fullName": "T.A. Cook", "givenName": "T.A.", "surname": "Cook", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electr. & Comput. Eng., Rutgers Univ., Piscataway, NJ, USA", "fullName": "D. Silver", "givenName": "D.", "surname": "Silver", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electr. & Comput. Eng., Rutgers Univ., Piscataway, NJ, USA", "fullName": "P.S. D'Urbano", "givenName": "P.S.", "surname": "D'Urbano", "__typename": "ArticleAuthorType" } ], "idPrefix": "fccm", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1995-04-01T00:00:00", "pubType": "proceedings", "pages": "0116", "year": "1995", "issn": null, "isbn": "0-8186-7086-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "70860110", "articleId": "12OmNCcbE1q", "__typename": "AdjacentArticleType" }, "next": { "fno": "70860125", "articleId": "12OmNCmpcIl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAolGJI", "title": "2014 IEEE 4th Symposium on Large Data Analysis and Visualization (LDAV)", "acronym": "ldav", "groupId": "1800568", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNxWcH5i", "doi": "10.1109/LDAV.2014.7013200", "title": "Cache-aware sampling strategies for texture-based ray casting on GPU", "normalizedTitle": "Cache-aware sampling strategies for texture-based ray casting on GPU", "abstract": "As a major component of volume rendering, the ray casting algorithm is memory-intensive. However, most existing texture-based volume rendering methods blindly map computational resources to texture memory and result in an incoherent access pattern, causing low cache hit rates in certain cases. The distance between samples taken by threads of the same scheduling unit (e.g. a warp of 32 threads in CUDA), of the GPU is a major factor that affects the texture cache hit rate. Based on this fact, we present a new sampling strategy, i.e. warp marching, which displays a novel computation-to-core mapping. In addition, a double buffer approach is introduced and special GPU operations are leveraged to improve the efficiency of parallel executions. To keep a roughly constant rendering performance when rotating the volume, we change our warp marching algorithm, so that samples can be taken along different directions of the volume. As a result, varying texture cache hit rates in different viewing directions are averaged out. Through a series of micro-benchmarking and real-life data experiments, we rigorously analyze our sampling strategies, and demonstrate significant performance enhancements over existing sampling methods.", "abstracts": [ { "abstractType": "Regular", "content": "As a major component of volume rendering, the ray casting algorithm is memory-intensive. However, most existing texture-based volume rendering methods blindly map computational resources to texture memory and result in an incoherent access pattern, causing low cache hit rates in certain cases. The distance between samples taken by threads of the same scheduling unit (e.g. a warp of 32 threads in CUDA), of the GPU is a major factor that affects the texture cache hit rate. Based on this fact, we present a new sampling strategy, i.e. warp marching, which displays a novel computation-to-core mapping. In addition, a double buffer approach is introduced and special GPU operations are leveraged to improve the efficiency of parallel executions. To keep a roughly constant rendering performance when rotating the volume, we change our warp marching algorithm, so that samples can be taken along different directions of the volume. As a result, varying texture cache hit rates in different viewing directions are averaged out. Through a series of micro-benchmarking and real-life data experiments, we rigorously analyze our sampling strategies, and demonstrate significant performance enhancements over existing sampling methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As a major component of volume rendering, the ray casting algorithm is memory-intensive. However, most existing texture-based volume rendering methods blindly map computational resources to texture memory and result in an incoherent access pattern, causing low cache hit rates in certain cases. The distance between samples taken by threads of the same scheduling unit (e.g. a warp of 32 threads in CUDA), of the GPU is a major factor that affects the texture cache hit rate. Based on this fact, we present a new sampling strategy, i.e. warp marching, which displays a novel computation-to-core mapping. In addition, a double buffer approach is introduced and special GPU operations are leveraged to improve the efficiency of parallel executions. To keep a roughly constant rendering performance when rotating the volume, we change our warp marching algorithm, so that samples can be taken along different directions of the volume. As a result, varying texture cache hit rates in different viewing directions are averaged out. Through a series of micro-benchmarking and real-life data experiments, we rigorously analyze our sampling strategies, and demonstrate significant performance enhancements over existing sampling methods.", "fno": "07013200", "keywords": [ "Instruction Sets", "Graphics Processing Units", "Rendering Computer Graphics", "Benchmark Testing", "Casting", "Algorithm Design And Analysis", "Message Systems" ], "authors": [ { "affiliation": "Virginia Tech, USA", "fullName": "Junpeng Wang", "givenName": null, "surname": "Junpeng Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences, China", "fullName": "Fei Yang", "givenName": null, "surname": "Fei Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech, USA", "fullName": "Yong Cao", "givenName": null, "surname": "Yong Cao", "__typename": "ArticleAuthorType" } ], "idPrefix": "ldav", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-11-01T00:00:00", "pubType": "proceedings", "pages": "19-26", "year": "2014", "issn": null, "isbn": "978-1-4799-5215-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07013199", "articleId": "12OmNzwHvqS", "__typename": "AdjacentArticleType" }, "next": { "fno": "07013201", "articleId": "12OmNqzu6NX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iscsct/2008/3498/2/3498b783", "title": "An Octree Ray Casting Algorithm Based on Multi-core CPUs", "doi": null, "abstractUrl": "/proceedings-article/iscsct/2008/3498b783/12OmNAgGwg5", "parentPublication": { "id": "proceedings/iscsct/2008/3498/1", "title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mvhi/2010/4009/0/4009a468", "title": "An Accelerative Ray Casting Algorithm Based on Crossing-Area Technique", "doi": null, "abstractUrl": "/proceedings-article/mvhi/2010/4009a468/12OmNArbG2a", "parentPublication": { "id": "proceedings/mvhi/2010/4009/0", "title": "Machine Vision and Human-machine Interface, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2006/0693/0/04061557", "title": "Ray Casting of Trimmed NURBS Surfaces on the GPU", "doi": null, "abstractUrl": "/proceedings-article/rt/2006/04061557/12OmNBNM8TN", "parentPublication": { "id": "proceedings/rt/2006/0693/0", "title": "IEEE Symposium on Interactive Ray Tracing 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vv/1998/9180/0/91800055", "title": "Adaptive Perspective Ray Casting", "doi": null, "abstractUrl": "/proceedings-article/vv/1998/91800055/12OmNBRsVxg", "parentPublication": { "id": "proceedings/vv/1998/9180/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2001/7200/0/7200westermann", "title": "Accelerated Volume Ray-Casting using Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200westermann/12OmNCbU30D", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156372", "title": "Computation-to-core mapping strategies for iso-surface volume rendering on GPUs", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156372/12OmNwkzulc", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esiat/2009/3682/2/3682b575", "title": "Rapid Texture-based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/esiat/2009/3682b575/12OmNx7G5VW", "parentPublication": { "id": "esiat/2009/3682/2", "title": "Environmental Science and Information Application Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300044", "title": "Hardware-Based Ray Casting for Tetrahedral Meshes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300044/12OmNzXnNw2", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/06/ttg2009061571", "title": "Volume Ray Casting with Peak Finding and Differential Sampling", "doi": null, "abstractUrl": "/journal/tg/2009/06/ttg2009061571/13rRUxBa55W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccgiv/2022/9250/0/925000a183", "title": "Ellipsoidal ray casting algorithm", "doi": null, "abstractUrl": "/proceedings-article/iccgiv/2022/925000a183/1LxfqGjszTi", "parentPublication": { "id": "proceedings/iccgiv/2022/9250/0", "title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBSBkfy", "title": "1991 Proceeding Visualization", "acronym": "visual", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "1991", "__typename": "ProceedingType" }, "article": { "id": "12OmNyLiuv5", "doi": "10.1109/VISUAL.1991.175806", "title": "A fast ray tracing casting algorithm using adaptive isotriangular subdivision", "normalizedTitle": "A fast ray tracing casting algorithm using adaptive isotriangular subdivision", "abstract": "The use of ray casting in volume rendering and its uses and advantages over surface rendering algorithms are discussed. Various adaptive algorithms that attempt to overcome its problem of high computational cost by taking advantage of image coherency and the bandlimited nature of volume data are described. A method of subdividing the image plane with isosceles triangles, instead of quadrants as is usually done is proposed. It results in fewer rays being fired without sacrificing image quality. A brief theoretical analysis of the algorithm in comparison with other methods is given.<>", "abstracts": [ { "abstractType": "Regular", "content": "The use of ray casting in volume rendering and its uses and advantages over surface rendering algorithms are discussed. Various adaptive algorithms that attempt to overcome its problem of high computational cost by taking advantage of image coherency and the bandlimited nature of volume data are described. A method of subdividing the image plane with isosceles triangles, instead of quadrants as is usually done is proposed. It results in fewer rays being fired without sacrificing image quality. A brief theoretical analysis of the algorithm in comparison with other methods is given.<>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The use of ray casting in volume rendering and its uses and advantages over surface rendering algorithms are discussed. Various adaptive algorithms that attempt to overcome its problem of high computational cost by taking advantage of image coherency and the bandlimited nature of volume data are described. A method of subdividing the image plane with isosceles triangles, instead of quadrants as is usually done is proposed. It results in fewer rays being fired without sacrificing image quality. A brief theoretical analysis of the algorithm in comparison with other methods is given.", "fno": "00175806", "keywords": [ "Computational Geometry", "Computer Graphics", "Geometrical Optics", "Fast Ray Tracing Casting Algorithm", "Adaptive Isotriangular Subdivision", "Volume Rendering", "Surface Rendering Algorithms", "Image Coherency", "Image Quality", "Ray Tracing", "Casting", "Rendering Computer Graphics", "Data Visualization", "Partitioning Algorithms", "Data Mining", "Image Quality", "Algorithm Design And Analysis", "Computational Modeling", "Image Generation" ], "authors": [ { "affiliation": "Inst. of Syst. Sci., Nat. Univ. of Singapore, Kent Ridge, Singapore", "fullName": "R. Shu", "givenName": "R.", "surname": "Shu", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. of Syst. Sci., Nat. Univ. of Singapore, Kent Ridge, Singapore", "fullName": "A. Liu", "givenName": "A.", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "visual", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1991-01-01T00:00:00", "pubType": "proceedings", "pages": "232-238, 426", "year": "1991", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00175805", "articleId": "12OmNwErpQj", "__typename": "AdjacentArticleType" }, "next": { "fno": "00175807", "articleId": "12OmNwdL7jq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/mvhi/2010/4009/0/4009a468", "title": "An Accelerative Ray Casting Algorithm Based on Crossing-Area Technique", "doi": null, "abstractUrl": "/proceedings-article/mvhi/2010/4009a468/12OmNArbG2a", "parentPublication": { "id": "proceedings/mvhi/2010/4009/0", "title": "Machine Vision and Human-machine Interface, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vv/1998/9180/0/91800055", "title": "Adaptive Perspective Ray Casting", "doi": null, "abstractUrl": "/proceedings-article/vv/1998/91800055/12OmNBRsVxg", "parentPublication": { "id": "proceedings/vv/1998/9180/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipps/1995/7074/0/70740707", "title": "An optimal parallel algorithm for volume ray casting", "doi": null, "abstractUrl": "/proceedings-article/ipps/1995/70740707/12OmNxd4tyh", "parentPublication": { "id": "proceedings/ipps/1995/7074/0", "title": "Proceedings of 9th International Parallel Processing Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/frontiers/1995/6965/0/69650238", "title": "An optimal parallel algorithm for volume ray casting", "doi": null, "abstractUrl": "/proceedings-article/frontiers/1995/69650238/12OmNxisQY8", "parentPublication": { "id": "proceedings/frontiers/1995/6965/0", "title": "Frontiers of Massively Parallel Processing, Symposium on the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1990/2083/0/00146394", "title": "Superposing images with shadow casting", "doi": null, "abstractUrl": "/proceedings-article/visual/1990/00146394/12OmNya72pN", "parentPublication": { "id": "proceedings/visual/1990/2083/0", "title": "1990 First IEEE Conference on Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2002/7498/0/7498mora", "title": "A New Object-Order Ray-Casting Algorithm", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2002/7498mora/12OmNyyeWwh", "parentPublication": { "id": "proceedings/ieee-vis/2002/7498/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/03/ttg2008030603", "title": "Interactive High-Resolution Isosurface Ray Casting on Multicore Processors", "doi": null, "abstractUrl": "/journal/tg/2008/03/ttg2008030603/13rRUEgs2LW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011121795", "title": "Extinction-Based Shading and Illumination in GPU Volume Ray-Casting", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011121795/13rRUwkxc5o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/06/ttg2009061571", "title": "Volume Ray Casting with Peak Finding and Differential Sampling", "doi": null, "abstractUrl": "/journal/tg/2009/06/ttg2009061571/13rRUxBa55W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1999/04/v0322", "title": "Fast Projection-Based Ray-Casting Algorithm for Rendering Curvilinear Volumes", "doi": null, "abstractUrl": "/journal/tg/1999/04/v0322/13rRUyY294r", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzl3WWZ", "title": "2013 17th International Conference on Information Visualisation", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNBNM8Ua", "doi": "10.1109/IV.2013.11", "title": "Voronoi-Based Label Placement for Metro Maps", "normalizedTitle": "Voronoi-Based Label Placement for Metro Maps", "abstract": "Metro maps with thumbnail photographs serve as common travel guides for providing sufficient information to meet the requirements of travelers in the cities. However, conventional methods attempt to minimize the total distance between stations and labels while maximizing the number of the labels rather than further taking into account the overall balance of the spatial distribution of labels. This paper presents an entropy-based approach for effectively annotating large annotation labels sufficiently close to the metro stations. Our idea is to decompose the entire labeling space intro regions bounded by the metro lines, and then further partition each region into Voronoi cells, each of which is reserved for a station to be annotated. This is accomplished by incorporating a new genetic-based optimization, while the fitness of the decomposition is evaluated by the entropy of the relative coverage ratios of such Voronoi cells. We also include several design examples to demonstrate that the proposed approach successfully distributes large labels around the metro network with minimal user intervention.", "abstracts": [ { "abstractType": "Regular", "content": "Metro maps with thumbnail photographs serve as common travel guides for providing sufficient information to meet the requirements of travelers in the cities. However, conventional methods attempt to minimize the total distance between stations and labels while maximizing the number of the labels rather than further taking into account the overall balance of the spatial distribution of labels. This paper presents an entropy-based approach for effectively annotating large annotation labels sufficiently close to the metro stations. Our idea is to decompose the entire labeling space intro regions bounded by the metro lines, and then further partition each region into Voronoi cells, each of which is reserved for a station to be annotated. This is accomplished by incorporating a new genetic-based optimization, while the fitness of the decomposition is evaluated by the entropy of the relative coverage ratios of such Voronoi cells. We also include several design examples to demonstrate that the proposed approach successfully distributes large labels around the metro network with minimal user intervention.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Metro maps with thumbnail photographs serve as common travel guides for providing sufficient information to meet the requirements of travelers in the cities. However, conventional methods attempt to minimize the total distance between stations and labels while maximizing the number of the labels rather than further taking into account the overall balance of the spatial distribution of labels. This paper presents an entropy-based approach for effectively annotating large annotation labels sufficiently close to the metro stations. Our idea is to decompose the entire labeling space intro regions bounded by the metro lines, and then further partition each region into Voronoi cells, each of which is reserved for a station to be annotated. This is accomplished by incorporating a new genetic-based optimization, while the fitness of the decomposition is evaluated by the entropy of the relative coverage ratios of such Voronoi cells. We also include several design examples to demonstrate that the proposed approach successfully distributes large labels around the metro network with minimal user intervention.", "fno": "5049a096", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Hsiang-Yun Wu", "givenName": "Hsiang-Yun", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shigeo Takahashi", "givenName": "Shigeo", "surname": "Takahashi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chun-Cheng Lin", "givenName": "Chun-Cheng", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hsu-Chun Yen", "givenName": "Hsu-Chun", "surname": "Yen", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-07-01T00:00:00", "pubType": "proceedings", "pages": "96-101", "year": "2013", "issn": "1550-6037", "isbn": "978-0-7695-5049-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5049a086", "articleId": "12OmNCy2L2I", "__typename": "AdjacentArticleType" }, "next": { "fno": "5049a102", "articleId": "12OmNwM6zXs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wowmom/2014/4786/0/06918958", "title": "Crowdsourced smartphone sensing for localization in metro trains", "doi": null, "abstractUrl": "/proceedings-article/wowmom/2014/06918958/12OmNwDSdnE", "parentPublication": { "id": "proceedings/wowmom/2014/4786/0", "title": "2014 IEEE 15th International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/soca/2014/6833/0/6833a309", "title": "On Context Management Using Metro Maps", "doi": null, "abstractUrl": "/proceedings-article/soca/2014/6833a309/12OmNyaXPPp", "parentPublication": { "id": "proceedings/soca/2014/6833/0", "title": "2014 IEEE 7th International Conference on Service-Oriented Computing and Applications (SOCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2015/7568/0/7568a009", "title": "Designing and Annotating Metro Maps with Loop Lines", "doi": null, "abstractUrl": "/proceedings-article/iv/2015/7568a009/12OmNylKAKN", "parentPublication": { "id": "proceedings/iv/2015/7568/0", "title": "2015 19th International Conference on Information Visualisation (iV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050626", "title": "Drawing and Labeling High-Quality Metro Maps by Mixed-Integer Programming", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050626/13rRUwdrdSw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/01/ttg2011010101", "title": "Automatic Metro Map Layout Using Multicriteria Optimization", "doi": null, "abstractUrl": "/journal/tg/2011/01/ttg2011010101/13rRUx0xPIA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/02/07102775", "title": "Interactive Metro Map Editing", "doi": null, "abstractUrl": "/journal/tg/2016/02/07102775/13rRUx0xPIL", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122528", "title": "Focus+Context Metro Maps", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122528/13rRUyY294B", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esaic/2018/8028/0/802800a261", "title": "Evaluation of Nanchang Metro Planning Based on Spatial Syntax", "doi": null, "abstractUrl": "/proceedings-article/esaic/2018/802800a261/17D45Xi9rWd", "parentPublication": { "id": "proceedings/esaic/2018/8028/0", "title": "2018 International Conference on Engineering Simulation and Intelligent Control (ESAIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/10107812", "title": "Spatio-Temporal Dynamic Graph Relation Learning for Urban Metro Flow Prediction", "doi": null, "abstractUrl": "/journal/tk/5555/01/10107812/1MDGjfriXp6", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09224192", "title": "MetroSets: Visualizing Sets as Metro Maps", "doi": null, "abstractUrl": "/journal/tg/2021/02/09224192/1nV7Me0F3Lq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxvO07E", "title": "2014 International Conference on Big Data and Smart Computing (BIGCOMP)", "acronym": "bigcomp", "groupId": "1803439", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNvk7JXn", "doi": "10.1109/BIGCOMP.2014.6741398", "title": "Octilinear layouts for metro map visualization", "normalizedTitle": "Octilinear layouts for metro map visualization", "abstract": "Octilinear design has been a feature of metro maps since the last century. However, it is only recent that layout of metro maps has been automated. In this paper, we explore ways to automate the layout of metro maps. In addition, we review another trend of integrating annotations for aiding travelers, while maintaining the octilinear layout. Finally, as personal navigational devices and platforms emerge, we discuss dynamic applications of this design and its future direction.", "abstracts": [ { "abstractType": "Regular", "content": "Octilinear design has been a feature of metro maps since the last century. However, it is only recent that layout of metro maps has been automated. In this paper, we explore ways to automate the layout of metro maps. In addition, we review another trend of integrating annotations for aiding travelers, while maintaining the octilinear layout. Finally, as personal navigational devices and platforms emerge, we discuss dynamic applications of this design and its future direction.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Octilinear design has been a feature of metro maps since the last century. However, it is only recent that layout of metro maps has been automated. In this paper, we explore ways to automate the layout of metro maps. In addition, we review another trend of integrating annotations for aiding travelers, while maintaining the octilinear layout. Finally, as personal navigational devices and platforms emerge, we discuss dynamic applications of this design and its future direction.", "fno": "06741398", "keywords": [], "authors": [ { "affiliation": "Dept. of Computer Science, KAIST, Korea", "fullName": "Pio Claudio", "givenName": "Pio", "surname": "Claudio", "__typename": "ArticleAuthorType" }, { "affiliation": "Division of Web Science and Technology and Dept. of Computer Science, KAIST, Korea", "fullName": "Sung-Eui Yoon", "givenName": null, "surname": "Sung-Eui Yoon", "__typename": "ArticleAuthorType" } ], "idPrefix": "bigcomp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-01-01T00:00:00", "pubType": "proceedings", "pages": "19-21", "year": "2014", "issn": null, "isbn": "978-1-4799-3919-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06741397", "articleId": "12OmNxR5UQN", "__typename": "AdjacentArticleType" }, "next": { "fno": "06741399", "articleId": "12OmNB0X8rI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iv/2004/2177/0/21770488", "title": "Getting to more Abstract Places using the Metro Map Metaphor", "doi": null, "abstractUrl": "/proceedings-article/iv/2004/21770488/12OmNANBZs7", "parentPublication": { "id": "proceedings/iv/2004/2177/0", "title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2013/5049/0/5049a096", "title": "Voronoi-Based Label Placement for Metro Maps", "doi": null, "abstractUrl": "/proceedings-article/iv/2013/5049a096/12OmNBNM8Ua", "parentPublication": { "id": "proceedings/iv/2013/5049/0", "title": "2013 17th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2004/2177/0/21770355", "title": "Metro Map Layout Using Multicriteria Optimization", "doi": null, "abstractUrl": "/proceedings-article/iv/2004/21770355/12OmNvJXeDm", "parentPublication": { "id": "proceedings/iv/2004/2177/0", "title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2015/7568/0/7568a009", "title": "Designing and Annotating Metro Maps with Loop Lines", "doi": null, "abstractUrl": "/proceedings-article/iv/2015/7568a009/12OmNylKAKN", "parentPublication": { "id": "proceedings/iv/2015/7568/0", "title": "2015 19th International Conference on Information Visualisation (iV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050626", "title": "Drawing and Labeling High-Quality Metro Maps by Mixed-Integer Programming", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050626/13rRUwdrdSw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/01/ttg2011010101", "title": "Automatic Metro Map Layout Using Multicriteria Optimization", "doi": null, "abstractUrl": "/journal/tg/2011/01/ttg2011010101/13rRUx0xPIA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/02/07102775", "title": "Interactive Metro Map Editing", "doi": null, "abstractUrl": "/journal/tg/2016/02/07102775/13rRUx0xPIL", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122528", "title": "Focus+Context Metro Maps", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122528/13rRUyY294B", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esaic/2018/8028/0/802800a261", "title": "Evaluation of Nanchang Metro Planning Based on Spatial Syntax", "doi": null, "abstractUrl": "/proceedings-article/esaic/2018/802800a261/17D45Xi9rWd", "parentPublication": { "id": "proceedings/esaic/2018/8028/0", "title": "2018 International Conference on Engineering Simulation and Intelligent Control (ESAIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2022/9007/0/900700a167", "title": "Effects of Image Features and Task Complexity on Eye Movement while searching Metro Map routes", "doi": null, "abstractUrl": "/proceedings-article/iv/2022/900700a167/1KaH4YF3DRm", "parentPublication": { "id": "proceedings/iv/2022/9007/0", "title": "2022 26th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwbcJ4l", "title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNAGw16e", "doi": "10.1109/ICCV.2011.6126303", "title": "Multiview 3D warps", "normalizedTitle": "Multiview 3D warps", "abstract": "Image registration and 3D reconstruction are fundamental computer vision and medical imaging problems. They are particularly challenging when the input data are images of a deforming body obtained by a single moving camera. We propose a new modelling framework, the multiview 3D warps. Existing models are twofold: they estimate inter-image warps which are often inconsistent between the different images and do not model the underlying 3D structure, or reconstruct just a sparse set of points. In contrast, our multiview 3D warps combine the advantages of both; they have an explicit 3D component and a set of 3D deformations combined with projection to 2D. They thus capture the dense deforming body's time-varying shape and camera pose. The advantages over the classical solutions are numerous: thanks to our feature-based estimation method for the multiview 3D warps, one can not only augment the original images but also retarget or clone the observed body's 3D deformations by changing the pose. Experimental results on simulated and real data are reported, confirming the advantages of our framework over existing methods.", "abstracts": [ { "abstractType": "Regular", "content": "Image registration and 3D reconstruction are fundamental computer vision and medical imaging problems. They are particularly challenging when the input data are images of a deforming body obtained by a single moving camera. We propose a new modelling framework, the multiview 3D warps. Existing models are twofold: they estimate inter-image warps which are often inconsistent between the different images and do not model the underlying 3D structure, or reconstruct just a sparse set of points. In contrast, our multiview 3D warps combine the advantages of both; they have an explicit 3D component and a set of 3D deformations combined with projection to 2D. They thus capture the dense deforming body's time-varying shape and camera pose. The advantages over the classical solutions are numerous: thanks to our feature-based estimation method for the multiview 3D warps, one can not only augment the original images but also retarget or clone the observed body's 3D deformations by changing the pose. Experimental results on simulated and real data are reported, confirming the advantages of our framework over existing methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Image registration and 3D reconstruction are fundamental computer vision and medical imaging problems. They are particularly challenging when the input data are images of a deforming body obtained by a single moving camera. We propose a new modelling framework, the multiview 3D warps. Existing models are twofold: they estimate inter-image warps which are often inconsistent between the different images and do not model the underlying 3D structure, or reconstruct just a sparse set of points. In contrast, our multiview 3D warps combine the advantages of both; they have an explicit 3D component and a set of 3D deformations combined with projection to 2D. They thus capture the dense deforming body's time-varying shape and camera pose. The advantages over the classical solutions are numerous: thanks to our feature-based estimation method for the multiview 3D warps, one can not only augment the original images but also retarget or clone the observed body's 3D deformations by changing the pose. Experimental results on simulated and real data are reported, confirming the advantages of our framework over existing methods.", "fno": "06126303", "keywords": [], "authors": [ { "affiliation": "Istituto Italiano di Tecnologia (IIT), Italy", "fullName": "Alessio Del Bue", "givenName": "Alessio", "surname": "Del Bue", "__typename": "ArticleAuthorType" }, { "affiliation": "ALCoV - ISIT, Université Clermont 1, Italy", "fullName": "Adrien Bartoli", "givenName": "Adrien", "surname": "Bartoli", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-11-01T00:00:00", "pubType": "proceedings", "pages": "675-682", "year": "2011", "issn": null, "isbn": "978-1-4577-1101-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06126439", "articleId": "12OmNBh8gWH", "__typename": "AdjacentArticleType" }, "next": { "fno": "06126222", "articleId": "12OmNxdVh2U", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032c326", "title": "SurfaceNet: An End-to-End 3D Neural Network for Multiview Stereopsis", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032c326/12OmNB8TUfZ", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2010/6984/0/05539796", "title": "Quasi-dense 3D reconstruction using tensor-based multiview stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05539796/12OmNBVIUsH", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icce/2009/2558/0/05012285", "title": "3D access control system of multiview sequence contents", "doi": null, "abstractUrl": "/proceedings-article/icce/2009/05012285/12OmNBvkdlt", "parentPublication": { "id": "proceedings/icce/2009/2558/0", "title": "2009 Digest of Technical Papers International Conference on Consumer Electronics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2017/2610/0/261001a548", "title": "Fast Multiview 3D Scan Registration Using Planar Structures", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a548/12OmNwFid2v", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dpvt/2006/2825/0/282500488", "title": "Multiview 3D Tracking with an Incrementally Constructed 3D Model", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2006/282500488/12OmNwJgAMj", "parentPublication": { "id": "proceedings/3dpvt/2006/2825/0", "title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2009/2353/0/04959689", "title": "Combined image plus depth seam carving for multiview 3D images", "doi": null, "abstractUrl": "/proceedings-article/icassp/2009/04959689/12OmNxYbT4y", "parentPublication": { "id": "proceedings/icassp/2009/2353/0", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2006/2503/0/25030585", "title": "Automatic Feature Extraction for Multiview 3D Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/fg/2006/25030585/12OmNxbEtIW", "parentPublication": { "id": "proceedings/fg/2006/2503/0", "title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050642", "title": "Spatioangular Prefiltering for Multiview 3D Displays", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050642/13rRUwjGoLC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/08/ttg2011081082", "title": "Fusing Multiview and Photometric Stereo for 3D Reconstruction under Uncalibrated Illumination", "doi": null, "abstractUrl": "/journal/tg/2011/08/ttg2011081082/13rRUy0qnGh", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600b451", "title": "Urban Semantic 3D Reconstruction From Multiview Satellite Imagery", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600b451/1iTvuw5QNaw", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwErpGQ", "title": "2010 Conference on Visual Media Production", "acronym": "cvmp", "groupId": "1003129", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNAoDhXO", "doi": "10.1109/CVMP.2010.23", "title": "Helium3D: A Laser-Based 3D Display with '3D+' Capability", "normalizedTitle": "Helium3D: A Laser-Based 3D Display with '3D+' Capability", "abstract": "In this paper we describe our latest research into 3D displays that do not require the wearing of special glasses (autostereoscopic), can provide 3D to several viewers who have a large degree of freedom of movement and direct a different image to every eye in the viewing field so that motion parallax (the ability to ‘look-around’ objects) and other interesting modes of operation are achieved (‘3D+’). The display operates by providing regions in the viewing field, referred to as exit pupils that follow the positions of the viewers’ eyes under the control of a multi-user head tracker. The display incorporates an RGB laser illumination source that illuminates a light engine. Light directions are controlled by a spatial light modulator and a front screen assembly incorporates a novel Gabor superlens. The principle of operation is explained and the construction of three iterations of the display is described.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we describe our latest research into 3D displays that do not require the wearing of special glasses (autostereoscopic), can provide 3D to several viewers who have a large degree of freedom of movement and direct a different image to every eye in the viewing field so that motion parallax (the ability to ‘look-around’ objects) and other interesting modes of operation are achieved (‘3D+’). The display operates by providing regions in the viewing field, referred to as exit pupils that follow the positions of the viewers’ eyes under the control of a multi-user head tracker. The display incorporates an RGB laser illumination source that illuminates a light engine. Light directions are controlled by a spatial light modulator and a front screen assembly incorporates a novel Gabor superlens. The principle of operation is explained and the construction of three iterations of the display is described.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we describe our latest research into 3D displays that do not require the wearing of special glasses (autostereoscopic), can provide 3D to several viewers who have a large degree of freedom of movement and direct a different image to every eye in the viewing field so that motion parallax (the ability to ‘look-around’ objects) and other interesting modes of operation are achieved (‘3D+’). The display operates by providing regions in the viewing field, referred to as exit pupils that follow the positions of the viewers’ eyes under the control of a multi-user head tracker. The display incorporates an RGB laser illumination source that illuminates a light engine. Light directions are controlled by a spatial light modulator and a front screen assembly incorporates a novel Gabor superlens. The principle of operation is explained and the construction of three iterations of the display is described.", "fno": "4268a123", "keywords": [ "Autostereoscopic", "Liquid Crystal Display LCD", "Red Green Blue RGB Laser", "Liquid Crystal On Silicon LCOS", "Spatial Light Modulator SLM" ], "authors": [ { "affiliation": null, "fullName": "Rajwinder Singh Brar", "givenName": "Rajwinder Singh", "surname": "Brar", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Phil Surman", "givenName": "Phil", "surname": "Surman", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ian Sexton", "givenName": "Ian", "surname": "Sexton", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Klaus Hopf", "givenName": "Klaus", "surname": "Hopf", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvmp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-11-01T00:00:00", "pubType": "proceedings", "pages": "123-130", "year": "2010", "issn": null, "isbn": "978-0-7695-4268-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4268a114", "articleId": "12OmNC8MsMO", "__typename": "AdjacentArticleType" }, "next": { "fno": "4268a131", "articleId": "12OmNrJAe2o", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cw/2010/4215/0/4215a325", "title": "A System for Capturing, Rendering and Multiplexing Images on Multi-view Autostereoscopic Display", "doi": null, "abstractUrl": "/proceedings-article/cw/2010/4215a325/12OmNBv2CkF", "parentPublication": { "id": "proceedings/cw/2010/4215/0", "title": "2010 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apcip/2009/3699/2/3699b031", "title": "3D Multi-view Autostereoscopic Display and Its Key Technologie", "doi": null, "abstractUrl": "/proceedings-article/apcip/2009/3699b031/12OmNvAS4pe", "parentPublication": { "id": "proceedings/apcip/2009/3699/1", "title": "Information Processing, Asia-Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2006/2602/0/26020778", "title": "A Projection-Based Multi-view Time-multiplexed Autostereoscopic 3D Display System", "doi": null, "abstractUrl": "/proceedings-article/iv/2006/26020778/12OmNwDSdGX", "parentPublication": { "id": "proceedings/iv/2006/2602/0", "title": "Tenth International Conference on Information Visualisation (IV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icinis/2009/3852/0/3852a397", "title": "Research of a High-Resolution Volumetric 3D Display System", "doi": null, "abstractUrl": "/proceedings-article/icinis/2009/3852a397/12OmNwtn3v2", "parentPublication": { "id": "proceedings/icinis/2009/3852/0", "title": "Intelligent Networks and Intelligent Systems, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2013/5050/0/5050a757", "title": "Display Sharp 3D Images in the Air Using Low Resolution Liquid Crystal Panels", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a757/12OmNzZWbzd", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2005/08/r8031", "title": "Autostereoscopic 3D Displays", "doi": null, "abstractUrl": "/magazine/co/2005/08/r8031/13rRUB7a16j", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050642", "title": "Spatioangular Prefiltering for Multiview 3D Displays", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050642/13rRUwjGoLC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/11/ttg2011111690", "title": "Autostereoscopic 3D Display with Long Visualization Depth Using Referential Viewing Area-Based Integral Photography", "doi": null, "abstractUrl": "/journal/tg/2011/11/ttg2011111690/13rRUyfKIHG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a387", "title": "Light Field Display: An Adaptive Weighted Dual-Layer LCD Display for Multiple Views", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a387/1ap5x2N7jP2", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797796", "title": "Full Parallax Table Top 3D Display Using Visually Equivalent Light Field", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797796/1cJ1cj63M3u", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy6qfNS", "title": "Information Processing, Asia-Pacific Conference on", "acronym": "apcip", "groupId": "1002833", "volume": "2", "displayVolume": "2", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNvAS4pe", "doi": "10.1109/APCIP.2009.144", "title": "3D Multi-view Autostereoscopic Display and Its Key Technologie", "normalizedTitle": "3D Multi-view Autostereoscopic Display and Its Key Technologie", "abstract": "multi-view autostereoscopic three-dimensional (3D) display has been one of the most popular research issues in the world. And 3D displays are classified into two types: stereoscopic 3D display and autostereoscopic 3D display. The former includes head mount system, anaglyph system, polarized filter system, field sequential system and so on. The latter is also called true 3D display, it provides 3D perception without the need for special glasses or other head gear. In this paper we research on the multi-view autostereoscopic 3D display. Focusing on the multi-view stereoscopic 3D key technologies, the latest international development trends and existing problems is analyzed. a multi-view stereoscopic display system, related key technologies are detailed, which includes: Light field representation model and light field capturing system, high efficiency multi-view video coding and transmission method compatible with current video standard, high efficiency rendering method for arbitrary position view at the decoder, 3D display technologies and multi-view autostereoscopic display.", "abstracts": [ { "abstractType": "Regular", "content": "multi-view autostereoscopic three-dimensional (3D) display has been one of the most popular research issues in the world. And 3D displays are classified into two types: stereoscopic 3D display and autostereoscopic 3D display. The former includes head mount system, anaglyph system, polarized filter system, field sequential system and so on. The latter is also called true 3D display, it provides 3D perception without the need for special glasses or other head gear. In this paper we research on the multi-view autostereoscopic 3D display. Focusing on the multi-view stereoscopic 3D key technologies, the latest international development trends and existing problems is analyzed. a multi-view stereoscopic display system, related key technologies are detailed, which includes: Light field representation model and light field capturing system, high efficiency multi-view video coding and transmission method compatible with current video standard, high efficiency rendering method for arbitrary position view at the decoder, 3D display technologies and multi-view autostereoscopic display.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "multi-view autostereoscopic three-dimensional (3D) display has been one of the most popular research issues in the world. And 3D displays are classified into two types: stereoscopic 3D display and autostereoscopic 3D display. The former includes head mount system, anaglyph system, polarized filter system, field sequential system and so on. The latter is also called true 3D display, it provides 3D perception without the need for special glasses or other head gear. In this paper we research on the multi-view autostereoscopic 3D display. Focusing on the multi-view stereoscopic 3D key technologies, the latest international development trends and existing problems is analyzed. a multi-view stereoscopic display system, related key technologies are detailed, which includes: Light field representation model and light field capturing system, high efficiency multi-view video coding and transmission method compatible with current video standard, high efficiency rendering method for arbitrary position view at the decoder, 3D display technologies and multi-view autostereoscopic display.", "fno": "3699b031", "keywords": [ "Multi View", "Autostereoscopic Display", "Three Dimension Display 3 D Display", "Image Mosaic" ], "authors": [ { "affiliation": null, "fullName": "Yuhua Zhu", "givenName": "Yuhua", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tong Zhen", "givenName": "Tong", "surname": "Zhen", "__typename": "ArticleAuthorType" } ], "idPrefix": "apcip", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-07-01T00:00:00", "pubType": "proceedings", "pages": "31-35", "year": "2009", "issn": null, "isbn": "978-0-7695-3699-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3699b027", "articleId": "12OmNzt0IOT", "__typename": "AdjacentArticleType" }, "next": { "fno": "3699b036", "articleId": "12OmNwE9ODl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2001/0948/0/09480031", "title": "Immersive Autostereoscopic Display for Mutual Telexistence: TWISTER I (Telexistence Wide-angle Immersive STEReoscope model I)", "doi": null, "abstractUrl": "/proceedings-article/vr/2001/09480031/12OmNBJw9Ra", "parentPublication": { "id": "proceedings/vr/2001/0948/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cso/2010/4030/2/4030b444", "title": "Virtual View Synthesis for Multi-view 3D Display", "doi": null, "abstractUrl": "/proceedings-article/cso/2010/4030b444/12OmNBkfRmx", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2010/4215/0/4215a325", "title": "A System for Capturing, Rendering and Multiplexing Images on Multi-view Autostereoscopic Display", "doi": null, "abstractUrl": "/proceedings-article/cw/2010/4215a325/12OmNBv2CkF", "parentPublication": { "id": "proceedings/cw/2010/4215/0", "title": "2010 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2006/2602/0/26020778", "title": "A Projection-Based Multi-view Time-multiplexed Autostereoscopic 3D Display System", "doi": null, "abstractUrl": "/proceedings-article/iv/2006/26020778/12OmNwDSdGX", "parentPublication": { "id": "proceedings/iv/2006/2602/0", "title": "Tenth International Conference on Information Visualisation (IV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2007/0907/0/04142851", "title": "Character Interaction System with Autostereoscopic Display and Range Sensor", "doi": null, "abstractUrl": "/proceedings-article/3dui/2007/04142851/12OmNwc3wsr", "parentPublication": { "id": "proceedings/3dui/2007/0907/0", "title": "2007 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2005/08/r8031", "title": "Autostereoscopic 3D Displays", "doi": null, "abstractUrl": "/magazine/co/2005/08/r8031/13rRUB7a16j", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2005/08/r8046", "title": "Computer-Generated Holography as a Generic Display Technology", "doi": null, "abstractUrl": "/magazine/co/2005/08/r8046/13rRUB7a16k", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/01/ttg2008010084", "title": "Toward the Light Field Display: Autostereoscopic Rendering via a Cluster of Projectors", "doi": null, "abstractUrl": "/journal/tg/2008/01/ttg2008010084/13rRUwI5TXu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/02/v0228", "title": "Correcting Interperspective Aliasing in Autostereoscopic Displays", "doi": null, "abstractUrl": "/journal/tg/2005/02/v0228/13rRUwgyOj8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/11/ttg2011111690", "title": "Autostereoscopic 3D Display with Long Visualization Depth Using Referential Viewing Area-Based Integral Photography", "doi": null, "abstractUrl": "/journal/tg/2011/11/ttg2011111690/13rRUyfKIHG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAkWva9", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "acronym": "icassp", "groupId": "1000002", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNxYbT4y", "doi": "10.1109/ICASSP.2009.4959689", "title": "Combined image plus depth seam carving for multiview 3D images", "normalizedTitle": "Combined image plus depth seam carving for multiview 3D images", "abstract": "Multiview 3D displays have to multiplex a set of views on a single LCD panel. Due to this, each view has to be downsampled by a considerable amount leading to loss of details. In this paper, we extend the seam carving technique for adaptive resizing of images. It is proposed that the depth information be used along with the image pixel intensity values for resizing. This results in better resized multiview images. It is clear from the results presented that the object structure is maintained when the proposed method is used as compared to vanilla seam carving.", "abstracts": [ { "abstractType": "Regular", "content": "Multiview 3D displays have to multiplex a set of views on a single LCD panel. Due to this, each view has to be downsampled by a considerable amount leading to loss of details. In this paper, we extend the seam carving technique for adaptive resizing of images. It is proposed that the depth information be used along with the image pixel intensity values for resizing. This results in better resized multiview images. It is clear from the results presented that the object structure is maintained when the proposed method is used as compared to vanilla seam carving.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Multiview 3D displays have to multiplex a set of views on a single LCD panel. Due to this, each view has to be downsampled by a considerable amount leading to loss of details. In this paper, we extend the seam carving technique for adaptive resizing of images. It is proposed that the depth information be used along with the image pixel intensity values for resizing. This results in better resized multiview images. It is clear from the results presented that the object structure is maintained when the proposed method is used as compared to vanilla seam carving.", "fno": "04959689", "keywords": [], "authors": [ { "affiliation": "CALIT2, University of California, San Diego, 92093, USA", "fullName": "Vikas Ramachandra", "givenName": "Vikas", "surname": "Ramachandra", "__typename": "ArticleAuthorType" }, { "affiliation": "CALIT2, University of California, San Diego, 92093, USA", "fullName": "Matthias Zwicker", "givenName": "Matthias", "surname": "Zwicker", "__typename": "ArticleAuthorType" }, { "affiliation": "CALIT2, University of California, San Diego, 92093, USA", "fullName": "Truong Q. Nguyen", "givenName": "Truong Q.", "surname": "Nguyen", "__typename": "ArticleAuthorType" } ], "idPrefix": "icassp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-04-01T00:00:00", "pubType": "proceedings", "pages": "737-740", "year": "2009", "issn": null, "isbn": "978-1-4244-2353-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04959688", "articleId": "12OmNxHryh6", "__typename": "AdjacentArticleType" }, "next": { "fno": "04959690", "articleId": "12OmNwCsdyk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pdcat/2016/5081/0/07943389", "title": "Accumulative Energy-Based Seam Carving for Image Resizing", "doi": null, "abstractUrl": "/proceedings-article/pdcat/2016/07943389/12OmNAXglK1", "parentPublication": { "id": "proceedings/pdcat/2016/5081/0", "title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2009/5949/0/05407481", "title": "Fast seam carving using partial update and divide and conquer method", "doi": null, "abstractUrl": "/proceedings-article/isspit/2009/05407481/12OmNCbU33t", "parentPublication": { "id": "proceedings/isspit/2009/5949/0", "title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2014/7615/0/07097861", "title": "Optimizing Seam Carving on multi-GPU systems for real-time image resizing", "doi": null, "abstractUrl": "/proceedings-article/icpads/2014/07097861/12OmNsbGvDS", "parentPublication": { "id": "proceedings/icpads/2014/7615/0", "title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2014/4311/0/4311a082", "title": "Seam Carving for Color-Plus-Depth 3D Image", "doi": null, "abstractUrl": "/proceedings-article/ism/2014/4311a082/12OmNwDj0Y7", "parentPublication": { "id": "proceedings/ism/2014/4311/0", "title": "2014 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2012/4745/0/4745a596", "title": "Image Resizing Based on Geometry Preservation with Seam Carving", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2012/4745a596/12OmNwdbV2X", "parentPublication": { "id": "proceedings/trustcom/2012/4745/0", "title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2014/3435/0/3435a060", "title": "A Study of Image Retargeting Based on Seam Carving", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2014/3435a060/12OmNwswg2d", "parentPublication": { "id": "proceedings/icmtma/2014/3435/0", "title": "2014 Sixth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2008/2570/0/04607613", "title": "Content-aware image resizing using perceptual seam carving with human attention model", "doi": null, "abstractUrl": "/proceedings-article/icme/2008/04607613/12OmNxGALgl", "parentPublication": { "id": "proceedings/icme/2008/2570/0", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2011/4541/0/4541a050", "title": "Reverse Seam Carving", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a050/12OmNxjjEkK", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2013/5016/0/5016a675", "title": "Improved Adaptive Seam Carving for Image Retargeting", "doi": null, "abstractUrl": "/proceedings-article/icdma/2013/5016a675/12OmNzmclGc", "parentPublication": { "id": "proceedings/icdma/2013/5016/0", "title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2010/6984/0/05540165", "title": "Discontinuous seam-carving for video retargeting", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05540165/12OmNzwZ6vw", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyaXPPL", "title": "2013 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNxwncHd", "doi": "10.1109/3DV.2013.56", "title": "Geometric and Color Calibration of Multiview Panoramic Cameras for Life-Size 3D Immersive Video", "normalizedTitle": "Geometric and Color Calibration of Multiview Panoramic Cameras for Life-Size 3D Immersive Video", "abstract": "In this paper we address calibration of camera arrays for life-size 3D video acquisition and display where mosaicking and multiviewpoint stereo are combined to provide an immersive experience which, by its size, resolution, and three-dimensionality, is meant to rival being there. This coupling of multiview and mosaicking requires integration of numerous synchronized video streams in a single presentation, aligned for both panoramic blending and epipolar rectification. The calibration framework we have developed extends the classical checkerboard approach through a modular multi-stage pipeline performing global optimization across intrinsics, extrinsics, panoramas, multiview epipolar alignments, and color correction. We demonstrate the methodology on several multiview camera arrays with various configurations aimed at mosaicking and epipolar light-field analysis. The results of this analysis have driven real-time life-sized panoramic 3D displays of captured events such as a concert, a fashion show, and sports activity.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we address calibration of camera arrays for life-size 3D video acquisition and display where mosaicking and multiviewpoint stereo are combined to provide an immersive experience which, by its size, resolution, and three-dimensionality, is meant to rival being there. This coupling of multiview and mosaicking requires integration of numerous synchronized video streams in a single presentation, aligned for both panoramic blending and epipolar rectification. The calibration framework we have developed extends the classical checkerboard approach through a modular multi-stage pipeline performing global optimization across intrinsics, extrinsics, panoramas, multiview epipolar alignments, and color correction. We demonstrate the methodology on several multiview camera arrays with various configurations aimed at mosaicking and epipolar light-field analysis. The results of this analysis have driven real-time life-sized panoramic 3D displays of captured events such as a concert, a fashion show, and sports activity.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we address calibration of camera arrays for life-size 3D video acquisition and display where mosaicking and multiviewpoint stereo are combined to provide an immersive experience which, by its size, resolution, and three-dimensionality, is meant to rival being there. This coupling of multiview and mosaicking requires integration of numerous synchronized video streams in a single presentation, aligned for both panoramic blending and epipolar rectification. The calibration framework we have developed extends the classical checkerboard approach through a modular multi-stage pipeline performing global optimization across intrinsics, extrinsics, panoramas, multiview epipolar alignments, and color correction. We demonstrate the methodology on several multiview camera arrays with various configurations aimed at mosaicking and epipolar light-field analysis. The results of this analysis have driven real-time life-sized panoramic 3D displays of captured events such as a concert, a fashion show, and sports activity.", "fno": "5067a374", "keywords": [ "Multiview Rectification", "Camera Calibration", "Epipolar Light Field Analysis", "Mosaicking", "Multiview Imaging" ], "authors": [ { "affiliation": null, "fullName": "Gregorij Kurillo", "givenName": "Gregorij", "surname": "Kurillo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Harlyn Baker", "givenName": "Harlyn", "surname": "Baker", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zeyu Li", "givenName": "Zeyu", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ruzena Bajcsy", "givenName": "Ruzena", "surname": "Bajcsy", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-06-01T00:00:00", "pubType": "proceedings", "pages": "374-381", "year": "2013", "issn": null, "isbn": "978-0-7695-5067-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5067a366", "articleId": "12OmNBqMDtB", "__typename": "AdjacentArticleType" }, "next": { "fno": "5067a382", "articleId": "12OmNyKa6gn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2001/1143/1/114310034", "title": "Optimal Motion Estimation from Multiview Normalized Epipolar Constraint", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/114310034/12OmNAL3Bbb", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/omnivis/2002/1629/0/16290045", "title": "Calibration of Panoramic Catadioptric Sensors Made Easier", "doi": null, "abstractUrl": "/proceedings-article/omnivis/2002/16290045/12OmNro0I9H", "parentPublication": { "id": "proceedings/omnivis/2002/1629/0", "title": "Omnidirectional Vision, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/omnivis/2002/1629/0/16290087", "title": "Multiview Panoramic Cameras Using a Mirror Pyramid", "doi": null, "abstractUrl": "/proceedings-article/omnivis/2002/16290087/12OmNvDZEVp", "parentPublication": { "id": "proceedings/omnivis/2002/1629/0", "title": "Omnidirectional Vision, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06238906", "title": "Capture considerations for multiview panoramic cameras", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06238906/12OmNyQYtuh", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/1/00937496", "title": "Optimal motion estimation from multiview normalized epipolar constraint", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/00937496/12OmNz5JCcf", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2000/0662/1/06621208", "title": "Cameras for Stereo Panoramic Imaging", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2000/06621208/12OmNzzP5EU", "parentPublication": { "id": "proceedings/cvpr/2000/0662/1", "title": "Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No.PR00662)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/1997/7761/0/00582103", "title": "Compression comparisons for multiview stereo", "doi": null, "abstractUrl": "/proceedings-article/dcc/1997/00582103/1dPodExh8NW", "parentPublication": { "id": "proceedings/dcc/1997/7761/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300a753", "title": "MONET: Multiview Semi-Supervised Keypoint Detection via Epipolar Divergence", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300a753/1hVlOob3kxq", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCaLEnn", "title": "2012 IEEE Symposium on Computers and Communications (ISCC)", "acronym": "iscc", "groupId": "1000156", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNBNM959", "doi": "10.1109/ISCC.2012.6249380", "title": "Automatic classification of cross-site scripting in web pages using document-based and URL-based features", "normalizedTitle": "Automatic classification of cross-site scripting in web pages using document-based and URL-based features", "abstract": "The structure of dynamic websites comprised of a set of objects such as HTML tags, script functions, hyperlinks and advanced features in browsers lead to numerous resources and interactiveness in services currently provided on the Internet. However, these features have also increased security risks and attacks since they allow malicious codes injection or XSS (Cross-Site Scripting). XSS remains at the top of the lists of the greatest threats to web applications in recent years. This paper presents the experimental results obtained on XSS automatic classification in web pages using Machine Learning techniques. We focus on features extracted from web document content and URL. Our results demonstrate that the proposed features lead to highly accurate classification of malicious page.", "abstracts": [ { "abstractType": "Regular", "content": "The structure of dynamic websites comprised of a set of objects such as HTML tags, script functions, hyperlinks and advanced features in browsers lead to numerous resources and interactiveness in services currently provided on the Internet. However, these features have also increased security risks and attacks since they allow malicious codes injection or XSS (Cross-Site Scripting). XSS remains at the top of the lists of the greatest threats to web applications in recent years. This paper presents the experimental results obtained on XSS automatic classification in web pages using Machine Learning techniques. We focus on features extracted from web document content and URL. Our results demonstrate that the proposed features lead to highly accurate classification of malicious page.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The structure of dynamic websites comprised of a set of objects such as HTML tags, script functions, hyperlinks and advanced features in browsers lead to numerous resources and interactiveness in services currently provided on the Internet. However, these features have also increased security risks and attacks since they allow malicious codes injection or XSS (Cross-Site Scripting). XSS remains at the top of the lists of the greatest threats to web applications in recent years. This paper presents the experimental results obtained on XSS automatic classification in web pages using Machine Learning techniques. We focus on features extracted from web document content and URL. Our results demonstrate that the proposed features lead to highly accurate classification of malicious page.", "fno": "IS281", "keywords": [ "Machine Learning", "Cross Site Scripting", "Scripting Languages Security", "Web Application Security" ], "authors": [], "idPrefix": "iscc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-07-01T00:00:00", "pubType": "proceedings", "pages": "000702-000707", "year": "2012", "issn": "1530-1346", "isbn": "978-1-4673-2712-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "IS273", "articleId": "12OmNAYGlnW", "__typename": "AdjacentArticleType" }, "next": { "fno": "IS282", "articleId": "12OmNyOq4Zl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iwsess/2009/3725/0/05068458", "title": "MUTEC: Mutation-based testing of Cross Site Scripting", "doi": null, "abstractUrl": "/proceedings-article/iwsess/2009/05068458/12OmNBtl1zc", "parentPublication": { "id": "proceedings/iwsess/2009/3725/0", "title": "Software Engineering for Secure Systems, ICSE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acsac/2008/3447/0/3447a335", "title": "XSSDS: Server-Side Detection of Cross-Site Scripting Attacks", "doi": null, "abstractUrl": "/proceedings-article/acsac/2008/3447a335/12OmNvlPkwq", "parentPublication": { "id": "proceedings/acsac/2008/3447/0", "title": "2008 Annual Computer Security Applications Conference (ACSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sp/2009/3633/0/3633a360", "title": "Secure Content Sniffing for Web Browsers, or How to Stop Papers from Reviewing Themselves", "doi": null, "abstractUrl": "/proceedings-article/sp/2009/3633a360/12OmNzlD9wr", "parentPublication": { "id": "proceedings/sp/2009/3633/0", "title": "2009 30th IEEE Symposium on Security and Privacy", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2012/03/mco2012030055", "title": "Defending against Cross-Site Scripting Attacks", "doi": null, "abstractUrl": "/magazine/co/2012/03/mco2012030055/13rRUwIF64x", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccnea/2022/9109/0/910900a154", "title": "Research on Cross-site Scripting Vulnerability of XSS Based on International Student Website", "doi": null, "abstractUrl": "/proceedings-article/iccnea/2022/910900a154/1HYv3WAQBMI", "parentPublication": { "id": "proceedings/iccnea/2022/9109/0", "title": "2022 International Conference on Computer Network, Electronic and Automation (ICCNEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/issre/2022/5132/0/513200a355", "title": "A Sanitizer-centric Analysis to Detect Cross-Site Scripting in PHP Programs", "doi": null, "abstractUrl": "/proceedings-article/issre/2022/513200a355/1JhTKsEM7Xq", "parentPublication": { "id": "proceedings/issre/2022/5132/0", "title": "2022 IEEE 33rd International Symposium on Software Reliability Engineering (ISSRE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icstw/2019/0888/0/088800a122", "title": "Practical Combinatorial Testing for XSS Detection using Locally Optimized Attack Models", "doi": null, "abstractUrl": "/proceedings-article/icstw/2019/088800a122/1aDT84u8usw", "parentPublication": { "id": "proceedings/icstw/2019/0888/0", "title": "2019 IEEE International Conference on Software Testing, Verification and Validation Workshops (ICSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cyberc/2019/2542/0/254200a071", "title": "Automatic XSS Detection and Automatic Anti-Anti-Virus Payload Generation", "doi": null, "abstractUrl": "/proceedings-article/cyberc/2019/254200a071/1gjS0ZhO4W4", "parentPublication": { "id": "proceedings/cyberc/2019/2542/0", "title": "2019 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/saner/2021/9630/0/963000a154", "title": "XSnare: Application-specific client-side cross-site scripting protection", "doi": null, "abstractUrl": "/proceedings-article/saner/2021/963000a154/1twfqpxOkKI", "parentPublication": { "id": "proceedings/saner/2021/9630/0", "title": "2021 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/euros&pw/2021/1012/0/999900a060", "title": "Adopting Trusted Types in ProductionWeb Frameworks to Prevent DOM-Based Cross-Site Scripting: A Case Study", "doi": null, "abstractUrl": "/proceedings-article/euros&pw/2021/999900a060/1y63jGaybCg", "parentPublication": { "id": "proceedings/euros&pw/2021/1012/0", "title": "2021 IEEE European Symposium on Security and Privacy Workshops (EuroS&PW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxuXcDx", "title": "Visual Languages, IEEE Symposium on", "acronym": "vl", "groupId": "1000793", "volume": "0", "displayVolume": "0", "year": "1998", "__typename": "ProceedingType" }, "article": { "id": "12OmNBl6EH3", "doi": "10.1109/VL.1998.706173", "title": "Multimedia Workshop: Exploring the Benefits of a Visual Scripting Language", "normalizedTitle": "Multimedia Workshop: Exploring the Benefits of a Visual Scripting Language", "abstract": "Multimedia programming environments are intuitive and easy to use for even the non-programmer. However, the textual scripting languages of these environments are difficult to use and require programming experience to code significant functionality. We pro pose a visual scripting language to help bridge the usability gap between the multimedia programming environment and its scripting language by cognitively simplifying the scripting task. Visual programming languages can be easier and faster to program in than textual languages, and visual code can be easier to understand. We explore our hypothesis by designing a multimedia programming environment with a visual scripting language.", "abstracts": [ { "abstractType": "Regular", "content": "Multimedia programming environments are intuitive and easy to use for even the non-programmer. However, the textual scripting languages of these environments are difficult to use and require programming experience to code significant functionality. We pro pose a visual scripting language to help bridge the usability gap between the multimedia programming environment and its scripting language by cognitively simplifying the scripting task. Visual programming languages can be easier and faster to program in than textual languages, and visual code can be easier to understand. We explore our hypothesis by designing a multimedia programming environment with a visual scripting language.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Multimedia programming environments are intuitive and easy to use for even the non-programmer. However, the textual scripting languages of these environments are difficult to use and require programming experience to code significant functionality. We pro pose a visual scripting language to help bridge the usability gap between the multimedia programming environment and its scripting language by cognitively simplifying the scripting task. Visual programming languages can be easier and faster to program in than textual languages, and visual code can be easier to understand. We explore our hypothesis by designing a multimedia programming environment with a visual scripting language.", "fno": "87120280", "keywords": [ "Multimedia Programming Visual Scripting Language User Interface Design" ], "authors": [ { "affiliation": "Dalhousie University", "fullName": "Andrea M. Winn", "givenName": "Andrea M.", "surname": "Winn", "__typename": "ArticleAuthorType" }, { "affiliation": "Dalhousie University", "fullName": "Trevor J. Smedley", "givenName": "Trevor J.", "surname": "Smedley", "__typename": "ArticleAuthorType" } ], "idPrefix": "vl", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1998-09-01T00:00:00", "pubType": "proceedings", "pages": "280", "year": "1998", "issn": "1049-2615", "isbn": "0-8186-8712-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "87120270", "articleId": "12OmNzX6ctc", "__typename": "AdjacentArticleType" }, "next": { "fno": "87120288", "articleId": "12OmNx7G67Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvzJG4b", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "2", "displayVolume": "2", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNCbU2XL", "doi": "10.1109/ICME.2004.1394334", "title": "An extensible scripting language for interactive animation in a speech-enabled virtual environment", "normalizedTitle": "An extensible scripting language for interactive animation in a speech-enabled virtual environment", "abstract": "Character animations on most virtual environment systems are canned motions created off-line through motion capture techniques. The motions are then encoded and transmitted with a fixed format and played at the client side. In this paper, we have proposed an XML-based scripting language, called eXtensible Animation Markup Language (XAML), to describe interactive dialog-based animations. The language is designed to describe character animations at various command levels and to compose a new animation from existing animation clips. In addition, the language is extended to incorporate other dialog-based scripting language such as VoiceXML. We have implemented such a system in Java that can interpret the language and render 3D animations based on the user's interactive voice commands", "abstracts": [ { "abstractType": "Regular", "content": "Character animations on most virtual environment systems are canned motions created off-line through motion capture techniques. The motions are then encoded and transmitted with a fixed format and played at the client side. In this paper, we have proposed an XML-based scripting language, called eXtensible Animation Markup Language (XAML), to describe interactive dialog-based animations. The language is designed to describe character animations at various command levels and to compose a new animation from existing animation clips. In addition, the language is extended to incorporate other dialog-based scripting language such as VoiceXML. We have implemented such a system in Java that can interpret the language and render 3D animations based on the user's interactive voice commands", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Character animations on most virtual environment systems are canned motions created off-line through motion capture techniques. The motions are then encoded and transmitted with a fixed format and played at the client side. In this paper, we have proposed an XML-based scripting language, called eXtensible Animation Markup Language (XAML), to describe interactive dialog-based animations. The language is designed to describe character animations at various command levels and to compose a new animation from existing animation clips. In addition, the language is extended to incorporate other dialog-based scripting language such as VoiceXML. We have implemented such a system in Java that can interpret the language and render 3D animations based on the user's interactive voice commands", "fno": "01394334", "keywords": [ "Computer Animation", "Natural Language Interfaces", "Rendering Computer Graphics", "Speech Based User Interfaces", "Virtual Reality", "XML", "Extensible Scripting Language", "Interactive Animation", "Speech Enabled Virtual Environment", "Character Animations", "Virtual Environment Systems", "Off Line Created Canned Motions", "Motion Capture Techniques", "Encoded Motions", "Fixed Transmission Format", "XML Based Scripting Language", "E Xtensible Animation Markup Language", "XAML", "Interactive Dialog Based Animations", "Language Design", "Voice XML", "Command Levels", "Animation Clips", "Animation Composition", "Dialog Based Scripting Language", "Java", "Language Interpretation", "3 D Animation Rendering", "User Interactive Voice Commands", "Natural Languages", "Animation", "Virtual Environment", "Markup Languages", "Java", "Costs", "Computer Science", "Application Software", "Production", "XML" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Nat. Chengchi Univ., Taipei, Taiwan", "fullName": "Tsai-Yen Li", "givenName": null, "surname": "Tsai-Yen Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Nat. Chengchi Univ., Taipei, Taiwan", "fullName": "Mao-Yung Liao", "givenName": null, "surname": "Mao-Yung Liao", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Nat. Chengchi Univ., Taipei, Taiwan", "fullName": "Chun-Feng Liao", "givenName": null, "surname": "Chun-Feng Liao", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-01-01T00:00:00", "pubType": "proceedings", "pages": "851,852,853,854", "year": "2004", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01394333", "articleId": "12OmNxR5UPt", "__typename": "AdjacentArticleType" }, "next": { "fno": "01394335", "articleId": "12OmNBE7Mtn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2009/4800/0/05349524", "title": "EMBR: A realtime animation engine for interactive embodied agents", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349524/12OmNAJ4pdW", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1996/7588/0/75880184", "title": "AniLan --- An Animation Language", "doi": null, "abstractUrl": "/proceedings-article/ca/1996/75880184/12OmNAXglMn", "parentPublication": { "id": "proceedings/ca/1996/7588/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wsc/1993/1381/0/00718043", "title": "Proof Animation: Better Animation for Your Simulation", "doi": null, "abstractUrl": "/proceedings-article/wsc/1993/00718043/12OmNBBQZoj", "parentPublication": { "id": "proceedings/wsc/1993/1381/0", "title": "Proceedings of 1993 Winter Simulation Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1994/6240/0/00323995", "title": "Integrating a scripting language into an interactive animation system", "doi": null, "abstractUrl": "/proceedings-article/ca/1994/00323995/12OmNButq8k", "parentPublication": { "id": "proceedings/ca/1994/6240/0", "title": "Proceedings of Computer Animation '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/chase/2016/4155/0/4155a058", "title": "An Empirical Study of Programming Paradigms for Animation", "doi": null, "abstractUrl": "/proceedings-article/chase/2016/4155a058/12OmNwBjP6H", "parentPublication": { "id": "proceedings/chase/2016/4155/0", "title": "2016 IEEE/ACM Cooperative and Human Aspects of Software Engineering (CHASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2004/8696/0/86960027", "title": "Effect Lines for Specifying Animation Effects", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2004/86960027/12OmNxE2mTv", "parentPublication": { "id": "proceedings/vlhcc/2004/8696/0", "title": "Proceedings. 2004 IEEE Symposium on Visual Languages and Human Centric Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2000/206/0/00870440", "title": "Graphical animation of behavior models", "doi": null, "abstractUrl": "/proceedings-article/icse/2000/00870440/12OmNxEjY9f", "parentPublication": { "id": "proceedings/icse/2000/206/0", "title": "Proceedings of International Conference on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmi/2002/1834/0/18340454", "title": "Multi-Modal Embodied Agents Scripting", "doi": null, "abstractUrl": "/proceedings-article/icmi/2002/18340454/12OmNxG1yL1", "parentPublication": { "id": "proceedings/icmi/2002/1834/0", "title": "Proceedings Fourth IEEE International Conference on Multimodal Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/alpit/2008/3273/0/3273a257", "title": "3D Visible Speech Animation Driven by Chinese Prosody Markup Language", "doi": null, "abstractUrl": "/proceedings-article/alpit/2008/3273a257/12OmNxwncJs", "parentPublication": { "id": "proceedings/alpit/2008/3273/0", "title": "Advanced Language Processing and Web Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/11/ttg2012111915", "title": "A Statistical Quality Model for Data-Driven Speech Animation", "doi": null, "abstractUrl": "/journal/tg/2012/11/ttg2012111915/13rRUIIVlkf", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNylsZKp", "title": "Proceedings of Technology of Object-Oriented Languages and Systems - TOOLS 30 (Cat. No.PR00278)", "acronym": "tools", "groupId": "1000744", "volume": "0", "displayVolume": "0", "year": "1999", "__typename": "ProceedingType" }, "article": { "id": "12OmNCcbEkJ", "doi": "10.1109/TOOLS.1999.10038", "title": "Agents and Workflow -- An Intimate Connection, or Just Friends?", "normalizedTitle": "Agents and Workflow -- An Intimate Connection, or Just Friends?", "abstract": "This panel addresses the perspective that agents and workflow can be seen as an evolution of components and scripting. There are several ways in which agents can be used to perform or support a workflow, and several ways in which workflow can be used to orchestrate or control the interactions between agents. The panelists explore several of these connections and various applications of agents and workflow. They discuss some of these relationships between agents and workflow, and propose opportunities for research and practice.", "abstracts": [ { "abstractType": "Regular", "content": "This panel addresses the perspective that agents and workflow can be seen as an evolution of components and scripting. There are several ways in which agents can be used to perform or support a workflow, and several ways in which workflow can be used to orchestrate or control the interactions between agents. The panelists explore several of these connections and various applications of agents and workflow. They discuss some of these relationships between agents and workflow, and propose opportunities for research and practice.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This panel addresses the perspective that agents and workflow can be seen as an evolution of components and scripting. There are several ways in which agents can be used to perform or support a workflow, and several ways in which workflow can be used to orchestrate or control the interactions between agents. The panelists explore several of these connections and various applications of agents and workflow. They discuss some of these relationships between agents and workflow, and propose opportunities for research and practice.", "fno": "02780558", "keywords": [ "Components", "Scripting", "Workflow", "Agents" ], "authors": [ { "affiliation": "Hewlett-Packard Laboratories", "fullName": "Martin L. Griss", "givenName": "Martin L.", "surname": "Griss", "__typename": "ArticleAuthorType" }, { "affiliation": "Hewlett-Packard Laboratories", "fullName": "Quiming Chen", "givenName": "Quiming", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Endeavors Technology Incorporated", "fullName": "Gregory A. Bolcer", "givenName": "Gregory A.", "surname": "Bolcer", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Utah", "fullName": "Robert R. Kessler", "givenName": "Robert R.", "surname": "Kessler", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Massachusetts", "fullName": "Leon J. Osterweil", "givenName": "Leon J.", "surname": "Osterweil", "__typename": "ArticleAuthorType" } ], "idPrefix": "tools", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1999-08-01T00:00:00", "pubType": "proceedings", "pages": "558", "year": "1999", "issn": "1530-2067", "isbn": "0-7695-0462-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "02780532", "articleId": "12OmNvo67GP", "__typename": "AdjacentArticleType" }, "next": { "fno": "02780563", "articleId": "12OmNx207g4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyr8Ytl", "title": "22nd International Conference on Advanced Information Networking and Applications (aina 2008)", "acronym": "aina", "groupId": "1000008", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNqOwQBJ", "doi": "10.1109/AINA.2008.31", "title": "A Scripting Approach for Workflow of Agents", "normalizedTitle": "A Scripting Approach for Workflow of Agents", "abstract": "Agents could be applied on workflow system to provide automation and coordination of business process. However, multi-agent systems may be developed on an agent platform. The mental states of agents may be implemented in program codes relevant to the platform. The internal workflow of agents could be planed by using a script-like approach. By this way, users could command the agents to do task for them with less addressing on the implementation of agent codes. Therefore, a scripting approach is proposed in this paper for the internal workflow of mobile agents. All the work done by agents is abstracted to a task-level. The script could be used to handle the service access, the flow control and the cooperation among agents.", "abstracts": [ { "abstractType": "Regular", "content": "Agents could be applied on workflow system to provide automation and coordination of business process. However, multi-agent systems may be developed on an agent platform. The mental states of agents may be implemented in program codes relevant to the platform. The internal workflow of agents could be planed by using a script-like approach. By this way, users could command the agents to do task for them with less addressing on the implementation of agent codes. Therefore, a scripting approach is proposed in this paper for the internal workflow of mobile agents. All the work done by agents is abstracted to a task-level. The script could be used to handle the service access, the flow control and the cooperation among agents.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Agents could be applied on workflow system to provide automation and coordination of business process. However, multi-agent systems may be developed on an agent platform. The mental states of agents may be implemented in program codes relevant to the platform. The internal workflow of agents could be planed by using a script-like approach. By this way, users could command the agents to do task for them with less addressing on the implementation of agent codes. Therefore, a scripting approach is proposed in this paper for the internal workflow of mobile agents. All the work done by agents is abstracted to a task-level. The script could be used to handle the service access, the flow control and the cooperation among agents.", "fno": "3095b049", "keywords": [ "Agent", "Workflow", "Scripting" ], "authors": [ { "affiliation": null, "fullName": "Guo-Ming Fang", "givenName": "Guo-Ming", "surname": "Fang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zeng-Wei Hong", "givenName": "Zeng-Wei", "surname": "Hong", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jim-Min Lin", "givenName": "Jim-Min", "surname": "Lin", "__typename": "ArticleAuthorType" } ], "idPrefix": "aina", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-03-01T00:00:00", "pubType": "proceedings", "pages": "1049-1053", "year": "2008", "issn": "1550-445X", "isbn": "978-0-7695-3095-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3095b042", "articleId": "12OmNCbU2Tn", "__typename": "AdjacentArticleType" }, "next": { "fno": "3095b054", "articleId": "12OmNzUxOeH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icita/2005/2316/1/231610209", "title": "Mechanism of Semantic Oriented Flexible Workflow", "doi": null, "abstractUrl": "/proceedings-article/icita/2005/231610209/12OmNAkWvFx", "parentPublication": { "id": "proceedings/icita/2005/2316/1", "title": "Proceedings. Third International Conference on Information Technology and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tools/1999/0278/0/02780558", "title": "Agents and Workflow -- An Intimate Connection, or Just Friends?", "doi": null, "abstractUrl": "/proceedings-article/tools/1999/02780558/12OmNCcbEkJ", "parentPublication": { "id": "proceedings/tools/1999/0278/0", "title": "Proceedings of Technology of Object-Oriented Languages and Systems - TOOLS 30 (Cat. No.PR00278)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccima/1999/0300/0/03000018", "title": "A Framework for Agent-Oriented Workflow in Agricultur", "doi": null, "abstractUrl": "/proceedings-article/iccima/1999/03000018/12OmNrYCXHs", "parentPublication": { "id": "proceedings/iccima/1999/0300/0", "title": "Computational Intelligence and Multimedia Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/time/2000/0756/0/07560031", "title": "Free Schedules for Free Agents in Workflow Systems", "doi": null, "abstractUrl": "/proceedings-article/time/2000/07560031/12OmNwIYZCT", "parentPublication": { "id": "proceedings/time/2000/0756/0", "title": "Temporal Representation and Reasoning, International Syposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aciids/2009/3580/0/3580a351", "title": "An Agent-Based Workflow System for Assisting in IC Design", "doi": null, "abstractUrl": "/proceedings-article/aciids/2009/3580a351/12OmNxG1yCC", "parentPublication": { "id": "proceedings/aciids/2009/3580/0", "title": "Intelligent Information and Database Systems, Asian Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmi/2002/1834/0/18340454", "title": "Multi-Modal Embodied Agents Scripting", "doi": null, "abstractUrl": "/proceedings-article/icmi/2002/18340454/12OmNxG1yL1", "parentPublication": { "id": "proceedings/icmi/2002/1834/0", "title": "Proceedings Fourth IEEE International Conference on Multimodal Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcse/2009/3570/1/3570a314", "title": "A Reconfigurable Platform of Manufacturing Execution System Based on Workflow and Agent", "doi": null, "abstractUrl": "/proceedings-article/wcse/2009/3570a314/12OmNybfqVU", "parentPublication": { "id": "proceedings/wcse/2009/3570/1", "title": "2009 WRI World Congress on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edoc/2000/0865/0/08650038", "title": "A Workflow and Agent Based Platform for Service Provisioning", "doi": null, "abstractUrl": "/proceedings-article/edoc/2000/08650038/12OmNzFdt8n", "parentPublication": { "id": "proceedings/edoc/2000/0865/0", "title": "Proceedings Fourth International Enterprise Distributed Objects Computing Conference. EDOC2000", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compcon/1996/7414/0/74140451", "title": "Structured Workflow Management with Lotus Notes Release 4", "doi": null, "abstractUrl": "/proceedings-article/compcon/1996/74140451/12OmNzWx07K", "parentPublication": { "id": "proceedings/compcon/1996/7414/0", "title": "Computer Conference, IEEE International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2001/0981/0/00926195", "title": "Collaborative multi-agents for workflow management", "doi": null, "abstractUrl": "/proceedings-article/hicss/2001/00926195/12OmNzcPA5p", "parentPublication": { "id": "proceedings/hicss/2001/0981/2", "title": "Proceedings of the 34th Annual Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzVXNIs", "title": "Proceedings of the 34th Annual Hawaii International Conference on System Sciences", "acronym": "hicss", "groupId": "1000730", "volume": "3", "displayVolume": "3", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNrJAeg5", "doi": "10.1109/HICSS.2001.926319", "title": "A Language for the Rapid Prototyping of Mobile Evolving Agents", "normalizedTitle": "A Language for the Rapid Prototyping of Mobile Evolving Agents", "abstract": "This article presents SAL, a general purpose scripting language for the rapid development of distributed software agents seamlessly embedded a visual environment. Integrated facilities for dynamic visualization provide simple but powerful means for debugging and domain-oriented animation. SAL agents are arranged on a set of 2D worksheets, which can be distributed over different machines. An agent's program is defined by the means of tables specifying a set of state transition rules with a condition and a sequence of actions each. Beyond basic computation and communication, actions can dynamically modify the agent's depiction, its program, and spawn arbitrary processes. A couple of examples finally demonstrate SAL's applicability in various domains like electronic systems design and process management.", "abstracts": [ { "abstractType": "Regular", "content": "This article presents SAL, a general purpose scripting language for the rapid development of distributed software agents seamlessly embedded a visual environment. Integrated facilities for dynamic visualization provide simple but powerful means for debugging and domain-oriented animation. SAL agents are arranged on a set of 2D worksheets, which can be distributed over different machines. An agent's program is defined by the means of tables specifying a set of state transition rules with a condition and a sequence of actions each. Beyond basic computation and communication, actions can dynamically modify the agent's depiction, its program, and spawn arbitrary processes. A couple of examples finally demonstrate SAL's applicability in various domains like electronic systems design and process management.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This article presents SAL, a general purpose scripting language for the rapid development of distributed software agents seamlessly embedded a visual environment. Integrated facilities for dynamic visualization provide simple but powerful means for debugging and domain-oriented animation. SAL agents are arranged on a set of 2D worksheets, which can be distributed over different machines. An agent's program is defined by the means of tables specifying a set of state transition rules with a condition and a sequence of actions each. Beyond basic computation and communication, actions can dynamically modify the agent's depiction, its program, and spawn arbitrary processes. A couple of examples finally demonstrate SAL's applicability in various domains like electronic systems design and process management.", "fno": "09813025", "keywords": [ "Agent Based Systems", "System Prototyping", "Synchronous Languages" ], "authors": [ { "affiliation": null, "fullName": "W. Mueller", "givenName": "W.", "surname": "Mueller", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "A. Meyer", "givenName": "A.", "surname": "Meyer", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "H. Zabel", "givenName": "H.", "surname": "Zabel", "__typename": "ArticleAuthorType" } ], "idPrefix": "hicss", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2001-01-01T00:00:00", "pubType": "proceedings", "pages": "3025", "year": "2001", "issn": "1530-1605", "isbn": "0-7695-0981-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09813024", "articleId": "12OmNxisR1y", "__typename": "AdjacentArticleType" }, "next": { "fno": "09813026", "articleId": "12OmNvDqsUt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/tai/1994/6785/0/00346404", "title": "Docile agents to process natural language", "doi": null, "abstractUrl": "/proceedings-article/tai/1994/00346404/12OmNArtheg", "parentPublication": { "id": "proceedings/tai/1994/6785/0", "title": "Proceedings Sixth International Conference on Tools with Artificial Intelligence. TAI 94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmpsac/1988/0873/0/00017206", "title": "Rapid prototyping using FRORL language", "doi": null, "abstractUrl": "/proceedings-article/cmpsac/1988/00017206/12OmNrAdsvx", "parentPublication": { "id": "proceedings/cmpsac/1988/0873/0", "title": "Proceedings COMPSAC 88: The Twelfth Annual International Computer Software & Applications Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmas/2000/0625/0/06250365", "title": "Integrated Individual and Social Reasoning Models for Organizational Agents", "doi": null, "abstractUrl": "/proceedings-article/icmas/2000/06250365/12OmNwE9OE3", "parentPublication": { "id": "proceedings/icmas/2000/0625/0", "title": "Multi-Agent Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/depend/2010/4090/0/4090a139", "title": "Coordination and Deployment of Mobile Agents on Dependable Systems", "doi": null, "abstractUrl": "/proceedings-article/depend/2010/4090a139/12OmNwNOaSu", "parentPublication": { "id": "proceedings/depend/2010/4090/0", "title": "Dependability, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2001/0981/0/00926319", "title": "A language for the rapid prototyping of mobile evolving agents", "doi": null, "abstractUrl": "/proceedings-article/hicss/2001/00926319/12OmNx7XGZC", "parentPublication": { "id": "proceedings/hicss/2001/0981/2", "title": "Proceedings of the 34th Annual Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wesaac/2011/4645/0/4645a091", "title": "Computational Agents Applied to Dengue Fever Simulation", "doi": null, "abstractUrl": "/proceedings-article/wesaac/2011/4645a091/12OmNxzMo1A", "parentPublication": { "id": "proceedings/wesaac/2011/4645/0", "title": "Agent Systems, their Environment and Applications, Workshop and School of", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iat/2006/2748/0/04052913", "title": "Generic Command Interpretation Algorithms for Conversational Agents", "doi": null, "abstractUrl": "/proceedings-article/iat/2006/04052913/12OmNy1SFC9", "parentPublication": { "id": "proceedings/iat/2006/2748/0", "title": "2006 IEEE/WIC/ACM International Conference on Intelligent Agent Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iat/2003/1931/0/19310378", "title": "Many is More, But Not Too Many: Dimensions of Cooperation of Agents with and without Predictive Capabilities", "doi": null, "abstractUrl": "/proceedings-article/iat/2003/19310378/12OmNylsZOx", "parentPublication": { "id": "proceedings/iat/2003/1931/0", "title": "Intelligent Agent Technology, IEEE / WIC / ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rsp/1996/7603/0/76030061", "title": "Rapid protocol prototyping from message sequence chart based specification", "doi": null, "abstractUrl": "/proceedings-article/rsp/1996/76030061/12OmNyqiaSu", "parentPublication": { "id": "proceedings/rsp/1996/7603/0", "title": "Rapid System Prototyping, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2020/5237/0/523700a459", "title": "Partner Selection for Agents: A Utility Theory Approach", "doi": null, "abstractUrl": "/proceedings-article/irc/2020/523700a459/1pP3SxDVbq0", "parentPublication": { "id": "proceedings/irc/2020/5237/0", "title": "2020 Fourth IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvA1hvp", "title": "Computer Vision, IEEE International Conference on", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "1998", "__typename": "ProceedingType" }, "article": { "id": "12OmNBE7Mrv", "doi": "10.1109/ICCV.1998.710849", "title": "3D Reconstruction with projective Octrees and Epipolar Geometry", "normalizedTitle": "3D Reconstruction with projective Octrees and Epipolar Geometry", "abstract": "In this paper, the problem of generating a 3D octree-like structure with the help of epipolar geometry within a projective framework is addressed. After a brief introduction on the basics of octrees and epipolar geometry, the new concept called \"projective octree\" is introduced together with an algorithm for building this projective structure. Finally, some results of the implementations are presented in the last section together with the conclusions and future work.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, the problem of generating a 3D octree-like structure with the help of epipolar geometry within a projective framework is addressed. After a brief introduction on the basics of octrees and epipolar geometry, the new concept called \"projective octree\" is introduced together with an algorithm for building this projective structure. Finally, some results of the implementations are presented in the last section together with the conclusions and future work.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, the problem of generating a 3D octree-like structure with the help of epipolar geometry within a projective framework is addressed. After a brief introduction on the basics of octrees and epipolar geometry, the new concept called \"projective octree\" is introduced together with an algorithm for building this projective structure. Finally, some results of the implementations are presented in the last section together with the conclusions and future work.", "fno": "82951067", "keywords": [], "authors": [ { "affiliation": "Universitat Politècnica de Catalunya", "fullName": "Blanca García", "givenName": "Blanca", "surname": "García", "__typename": "ArticleAuthorType" }, { "affiliation": "Universitat Politècnica de Catalunya", "fullName": "Pere Brunet", "givenName": "Pere", "surname": "Brunet", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1998-01-01T00:00:00", "pubType": "proceedings", "pages": "1067", "year": "1998", "issn": null, "isbn": "81-7319-221-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "82951061", "articleId": "12OmNz61d0W", "__typename": "AdjacentArticleType" }, "next": { "fno": "82951073", "articleId": "12OmNxGj9Uy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2009/3883/0/3883a496", "title": "Automatic 3D Ear Reconstruction Based on Epipolar Geometry", "doi": null, "abstractUrl": "/proceedings-article/icig/2009/3883a496/12OmNqH9hrw", "parentPublication": { "id": "proceedings/icig/2009/3883/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2006/2597/1/259711258", "title": "Epipolar Geometry of Central Projection Systems Using Veronese Maps", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2006/259711258/12OmNrMZpIR", "parentPublication": { "id": "proceedings/cvpr/2006/2597/2", "title": "2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1995/7042/0/70420932", "title": "A comparison of projective reconstruction methods for pairs of views", "doi": null, "abstractUrl": "/proceedings-article/iccv/1995/70420932/12OmNvA1hbI", "parentPublication": { "id": "proceedings/iccv/1995/7042/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/1/212810112", "title": "Epipolar Geometry Estimation via RANSAC Benefits from the Oriented Epipolar Constraint", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212810112/12OmNxGAKXl", "parentPublication": { "id": "proceedings/icpr/2004/2128/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/1/00937564", "title": "Cheirality in epipolar geometry", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/00937564/12OmNz6iOGN", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1999/0149/1/01491094", "title": "Projective Rectification Without Epipolar Geometry", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1999/01491094/12OmNzSh16b", "parentPublication": { "id": "proceedings/cvpr/1999/0149/2", "title": "Proceedings. 1999 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No PR00149)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/1/114310548", "title": "Cheirality in Epipolar Geometry", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/114310548/12OmNzsrwlj", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050669", "title": "Data-Parallel Octrees for Surface Reconstruction", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050669/13rRUxCitJ9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1994/10/i1036", "title": "Projective Reconstruction and Invariants from Multiple Images", "doi": null, "abstractUrl": "/journal/tp/1994/10/i1036/13rRUypp58v", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/07/09204467", "title": "Ray-Space Epipolar Geometry for Light Field Cameras", "doi": null, "abstractUrl": "/journal/tp/2022/07/09204467/1nkyUb2Y54k", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuirg", "title": "Visualization Conference, IEEE", "acronym": "ieee-vis", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNwvVrCE", "doi": "10.1109/VISUAL.2004.101", "title": "Surface Reconstruction of Noisy and Defective Data Sets", "normalizedTitle": "Surface Reconstruction of Noisy and Defective Data Sets", "abstract": "We present a novel surface reconstruction algorithm that can recover high-quality surfaces from noisy and defective data sets without any normal or orientation information. A set of new techniques are introduced to afford extra noise tolerability, robust orientation alignment, reliable outlier removal, and satisfactory feature recovery. In our algorithm, sample points are first organized by an octree. The points are then clustered into a set of monolithically singly-oriented groups. The inside/outside orientation of each group is determined through a robust voting algorithm. We locally fit an implicit quadric surface in each octree cell. The locally fitted implicit surfaces are then blended to produce a signed distance field using the modified Shepard's method. We develop sophisticated iterative fitting algorithms to afford improved noise tolerance both in topology recognition and geometry accuracy. Furthermore, this iterative fitting algorithm, coupled with a local model selection scheme, provides a reliable sharp feature recovery mechanism even in the presence of bad input.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel surface reconstruction algorithm that can recover high-quality surfaces from noisy and defective data sets without any normal or orientation information. A set of new techniques are introduced to afford extra noise tolerability, robust orientation alignment, reliable outlier removal, and satisfactory feature recovery. In our algorithm, sample points are first organized by an octree. The points are then clustered into a set of monolithically singly-oriented groups. The inside/outside orientation of each group is determined through a robust voting algorithm. We locally fit an implicit quadric surface in each octree cell. The locally fitted implicit surfaces are then blended to produce a signed distance field using the modified Shepard's method. We develop sophisticated iterative fitting algorithms to afford improved noise tolerance both in topology recognition and geometry accuracy. Furthermore, this iterative fitting algorithm, coupled with a local model selection scheme, provides a reliable sharp feature recovery mechanism even in the presence of bad input.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel surface reconstruction algorithm that can recover high-quality surfaces from noisy and defective data sets without any normal or orientation information. A set of new techniques are introduced to afford extra noise tolerability, robust orientation alignment, reliable outlier removal, and satisfactory feature recovery. In our algorithm, sample points are first organized by an octree. The points are then clustered into a set of monolithically singly-oriented groups. The inside/outside orientation of each group is determined through a robust voting algorithm. We locally fit an implicit quadric surface in each octree cell. The locally fitted implicit surfaces are then blended to produce a signed distance field using the modified Shepard's method. We develop sophisticated iterative fitting algorithms to afford improved noise tolerance both in topology recognition and geometry accuracy. Furthermore, this iterative fitting algorithm, coupled with a local model selection scheme, provides a reliable sharp feature recovery mechanism even in the presence of bad input.", "fno": "87880259", "keywords": [ "Computer Graphics", "Surface Reconstruction", "Surface Representation", "MPU Implicits", "Modified Shepards Method" ], "authors": [ { "affiliation": "State University of New York at Stony Brook", "fullName": "Hui Xie", "givenName": "Hui", "surname": "Xie", "__typename": "ArticleAuthorType" }, { "affiliation": "State University of New York at Stony Brook", "fullName": "Kevin T. McDonnell", "givenName": "Kevin T.", "surname": "McDonnell", "__typename": "ArticleAuthorType" }, { "affiliation": "State University of New York at Stony Brook", "fullName": "Hong Qin", "givenName": "Hong", "surname": "Qin", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-vis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-10-01T00:00:00", "pubType": "proceedings", "pages": "259-266", "year": "2004", "issn": null, "isbn": "0-7803-8788-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "87880251", "articleId": "12OmNBSjJ71", "__typename": "AdjacentArticleType" }, "next": { "fno": "87880267", "articleId": "12OmNsbGvEw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2003/2030/0/20300013", "title": "Piecewise C1 Continuous Surface Reconstruction of Noisy Point Clouds via Local Implicit Quadric Regression", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300013/12OmNApcukc", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dpvt/2006/2825/0/04155754", "title": "Beyond Silhouettes: Surface Reconstruction Using Multi-Flash Photography", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2006/04155754/12OmNBa2iEF", "parentPublication": { "id": "proceedings/3dpvt/2006/2825/0", "title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cadgraphics/2011/4497/0/4497a509", "title": "Implicit Surface Reconstruction Based on Adaptive Clustering", "doi": null, "abstractUrl": "/proceedings-article/cadgraphics/2011/4497a509/12OmNqJHFLk", "parentPublication": { "id": "proceedings/cadgraphics/2011/4497/0", "title": "Computer-Aided Design and Computer Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2005/9330/0/01500390", "title": "Surface reconstruction using oriented charges", "doi": null, "abstractUrl": "/proceedings-article/cgi/2005/01500390/12OmNwpoFK7", "parentPublication": { "id": "proceedings/cgi/2005/9330/0", "title": "Computer Graphics International 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1991/01/i0001", "title": "On Three-Dimensional Surface Reconstruction Methods", "doi": null, "abstractUrl": "/journal/tp/1991/01/i0001/13rRUwbs1Tn", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050669", "title": "Data-Parallel Octrees for Surface Reconstruction", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050669/13rRUxCitJ9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1512", "title": "Efficient Surface Reconstruction using Generalized Coulomb Potentials", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1512/13rRUy3xY84", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2006/02/i0328", "title": "Implicit Meshes for Surface Reconstruction", "doi": null, "abstractUrl": "/journal/tp/2006/02/i0328/13rRUygT7o3", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09839681", "title": "SSRNet: Scalable 3D Surface Reconstruction Network", "doi": null, "abstractUrl": "/journal/tg/5555/01/09839681/1FisL8u19du", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800a967", "title": "SSRNet: Scalable 3D Surface Reconstruction Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800a967/1m3nKc80MlG", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz2TCuM", "title": "SC Conference", "acronym": "sc", "groupId": "1000729", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNz6iOOi", "doi": "10.1145/1413370.1413389", "title": "Dendro: parallel algorithms for multigrid and AMR methods on 2:1 balanced octrees", "normalizedTitle": "Dendro: parallel algorithms for multigrid and AMR methods on 2:1 balanced octrees", "abstract": "In this article, we present Dendro, a suite of parallel algorithms for the discretization and solution of partial differential equations (PDEs) involving second-order elliptic operators. Dendro uses trilinear finite element discretizations constructed using octrees. Dendro, comprises four main modules: a bottom-up octree generation and 2:1 balancing module, a meshing module, a geometric multiplicative multigrid module, and a module for adaptive mesh refinement (AMR). Here, we focus on the multigrid and AMR modules. The key features of Dendro are coarsening/refinement, inter-octree transfers of scalar and vector fields, and parallel partition of multilevel octree forests. We describe a bottom-up algorithm for constructing the coarser multigrid levels. The input is an arbitrary 2:1 balanced octree-based mesh, representing the fine level mesh. The output is a set of octrees and meshes that are used in the multigrid sweeps. Also, we describe matrix-free implementations for the discretized PDE operators and the intergrid transfer operations. We present results on up to 4096 CPUs on the Cray XT3 (\"BigBen\"), the Intel 64 system (\"Abe\"), and the Sun Constellation Linux cluster (\"Ranger\").", "abstracts": [ { "abstractType": "Regular", "content": "In this article, we present Dendro, a suite of parallel algorithms for the discretization and solution of partial differential equations (PDEs) involving second-order elliptic operators. Dendro uses trilinear finite element discretizations constructed using octrees. Dendro, comprises four main modules: a bottom-up octree generation and 2:1 balancing module, a meshing module, a geometric multiplicative multigrid module, and a module for adaptive mesh refinement (AMR). Here, we focus on the multigrid and AMR modules. The key features of Dendro are coarsening/refinement, inter-octree transfers of scalar and vector fields, and parallel partition of multilevel octree forests. We describe a bottom-up algorithm for constructing the coarser multigrid levels. The input is an arbitrary 2:1 balanced octree-based mesh, representing the fine level mesh. The output is a set of octrees and meshes that are used in the multigrid sweeps. Also, we describe matrix-free implementations for the discretized PDE operators and the intergrid transfer operations. We present results on up to 4096 CPUs on the Cray XT3 (\"BigBen\"), the Intel 64 system (\"Abe\"), and the Sun Constellation Linux cluster (\"Ranger\").", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this article, we present Dendro, a suite of parallel algorithms for the discretization and solution of partial differential equations (PDEs) involving second-order elliptic operators. Dendro uses trilinear finite element discretizations constructed using octrees. Dendro, comprises four main modules: a bottom-up octree generation and 2:1 balancing module, a meshing module, a geometric multiplicative multigrid module, and a module for adaptive mesh refinement (AMR). Here, we focus on the multigrid and AMR modules. The key features of Dendro are coarsening/refinement, inter-octree transfers of scalar and vector fields, and parallel partition of multilevel octree forests. We describe a bottom-up algorithm for constructing the coarser multigrid levels. The input is an arbitrary 2:1 balanced octree-based mesh, representing the fine level mesh. The output is a set of octrees and meshes that are used in the multigrid sweeps. Also, we describe matrix-free implementations for the discretized PDE operators and the intergrid transfer operations. We present results on up to 4096 CPUs on the Cray XT3 (\"BigBen\"), the Intel 64 system (\"Abe\"), and the Sun Constellation Linux cluster (\"Ranger\").", "fno": "28350018", "keywords": [], "authors": [ { "affiliation": "Georgia Institute of Technology, Atlanta, GA", "fullName": "Rahul S. Sampath", "givenName": "Rahul S.", "surname": "Sampath", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Pennsylvania, Philadelphia, PA", "fullName": "Santi S. Adavani", "givenName": "Santi S.", "surname": "Adavani", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Pennsylvania, Philadelphia, PA", "fullName": "Hari Sundar", "givenName": "Hari", "surname": "Sundar", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology, Atlanta, GA", "fullName": "Ilya Lashuk", "givenName": "Ilya", "surname": "Lashuk", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology, Atlanta, GA", "fullName": "George Biros", "givenName": "George", "surname": "Biros", "__typename": "ArticleAuthorType" } ], "idPrefix": "sc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-11-01T00:00:00", "pubType": "proceedings", "pages": "1-12", "year": "2008", "issn": null, "isbn": "978-1-4244-2835-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05213771", "articleId": "12OmNxwncnW", "__typename": "AdjacentArticleType" }, "next": { "fno": "05218119", "articleId": "12OmNwF0C5b", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/1998/8295/0/82951067", "title": "3D Reconstruction with projective Octrees and Epipolar Geometry", "doi": null, "abstractUrl": "/proceedings-article/iccv/1998/82951067/12OmNBE7Mrv", "parentPublication": { "id": "proceedings/iccv/1998/8295/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2008/2835/0/28350062", "title": "Scalable adaptive mantle convection simulation on petascale supercomputers", "doi": null, "abstractUrl": "/proceedings-article/sc/2008/28350062/12OmNxXl5wP", "parentPublication": { "id": "proceedings/sc/2008/2835/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2012/0806/0/1000a045", "title": "Parallel geometric-algebraic multigrid on unstructured forests of octrees", "doi": null, "abstractUrl": "/proceedings-article/sc/2012/1000a045/12OmNy7Qfuf", "parentPublication": { "id": "proceedings/sc/2012/0806/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2006/2521/3/252130950", "title": "Triangular Mesh Generation of Octrees of Non-Convex 3D Objects", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252130950/12OmNylKB4R", "parentPublication": { "id": "proceedings/icpr/2006/2521/3", "title": "2006 18th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2007/3764/0/37640025", "title": "Low-constant parallel algorithms for finite element simulations using linear octrees", "doi": null, "abstractUrl": "/proceedings-article/sc/2007/37640025/12OmNzDNtr8", "parentPublication": { "id": "proceedings/sc/2007/3764/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2008/2835/0/05218558", "title": "Dendro: Parallel algorithms for multigrid and AMR methods on 2:1 balanced octrees", "doi": null, "abstractUrl": "/proceedings-article/sc/2008/05218558/12OmNzGlRBd", "parentPublication": { "id": "proceedings/sc/2008/2835/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1986/10/mcg1986100012", "title": "Viewing Transformations of Voxel-Based Objects Via Linear Octrees", "doi": null, "abstractUrl": "/magazine/cg/1986/10/mcg1986100012/13rRUxAAT9T", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050669", "title": "Data-Parallel Octrees for Surface Reconstruction", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050669/13rRUxCitJ9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/11/ttg2011111663", "title": "A Hexahedral Multigrid Approach for Simulating Cuts in Deformable Objects", "doi": null, "abstractUrl": "/journal/tg/2011/11/ttg2011111663/13rRUy0qnGi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2019/03/08451966", "title": "Persistent Octrees for Parallel Mesh Refinement through Non-Volatile Byte-Addressable Memory", "doi": null, "abstractUrl": "/journal/td/2019/03/08451966/17D45VsBU7z", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz2TCuM", "title": "SC Conference", "acronym": "sc", "groupId": "1000729", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNzGlRBd", "doi": "10.1109/SC.2008.5218558", "title": "Dendro: Parallel algorithms for multigrid and AMR methods on 2:1 balanced octrees", "normalizedTitle": "Dendro: Parallel algorithms for multigrid and AMR methods on 2:1 balanced octrees", "abstract": "In this article, we present Dendro, a suite of parallel algorithms for the discretization and solution of partial differential equations (PDEs) involving second-order elliptic operators. Dendro uses trilinear finite element discretizations constructed using octrees. Dendro, comprises four main modules: a bottom-up octree generation and 2:1 balancing module, a meshing module, a geometric multiplicative multigrid module, and a module for adaptive mesh refinement (AMR). Here, we focus on the multigrid and AMR modules. The key features of Dendro are coarsening/refinement, inter-octree transfers of scalar and vector fields, and parallel partition of multilevel octree forests. We describe a bottom-up algorithm for constructing the coarser multigrid levels. The input is an arbitrary 2:1 balanced octree-based mesh, representing the fine level mesh. The output is a set of octrees and meshes that are used in the multigrid sweeps. Also, we describe matrix-free implementations for the discretized PDE operators and the intergrid transfer operations. We present results on up to 4096 CPUs on the Cray XT3 (ldquoBigBenrdquo), the Intel 64 system (ldquoAberdquo), and the Sun Constellation Linux cluster (ldquoRangerrdquo).", "abstracts": [ { "abstractType": "Regular", "content": "In this article, we present Dendro, a suite of parallel algorithms for the discretization and solution of partial differential equations (PDEs) involving second-order elliptic operators. Dendro uses trilinear finite element discretizations constructed using octrees. Dendro, comprises four main modules: a bottom-up octree generation and 2:1 balancing module, a meshing module, a geometric multiplicative multigrid module, and a module for adaptive mesh refinement (AMR). Here, we focus on the multigrid and AMR modules. The key features of Dendro are coarsening/refinement, inter-octree transfers of scalar and vector fields, and parallel partition of multilevel octree forests. We describe a bottom-up algorithm for constructing the coarser multigrid levels. The input is an arbitrary 2:1 balanced octree-based mesh, representing the fine level mesh. The output is a set of octrees and meshes that are used in the multigrid sweeps. Also, we describe matrix-free implementations for the discretized PDE operators and the intergrid transfer operations. We present results on up to 4096 CPUs on the Cray XT3 (ldquoBigBenrdquo), the Intel 64 system (ldquoAberdquo), and the Sun Constellation Linux cluster (ldquoRangerrdquo).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this article, we present Dendro, a suite of parallel algorithms for the discretization and solution of partial differential equations (PDEs) involving second-order elliptic operators. Dendro uses trilinear finite element discretizations constructed using octrees. Dendro, comprises four main modules: a bottom-up octree generation and 2:1 balancing module, a meshing module, a geometric multiplicative multigrid module, and a module for adaptive mesh refinement (AMR). Here, we focus on the multigrid and AMR modules. The key features of Dendro are coarsening/refinement, inter-octree transfers of scalar and vector fields, and parallel partition of multilevel octree forests. We describe a bottom-up algorithm for constructing the coarser multigrid levels. The input is an arbitrary 2:1 balanced octree-based mesh, representing the fine level mesh. The output is a set of octrees and meshes that are used in the multigrid sweeps. Also, we describe matrix-free implementations for the discretized PDE operators and the intergrid transfer operations. We present results on up to 4096 CPUs on the Cray XT3 (ldquoBigBenrdquo), the Intel 64 system (ldquoAberdquo), and the Sun Constellation Linux cluster (ldquoRangerrdquo).", "fno": "05218558", "keywords": [ "Elliptic Equations", "Finite Element Analysis", "Octrees", "Parallel Algorithms", "Partial Differential Equations", "Dendro", "Parallel Algorithm", "AMR Method", "Balanced Octrees", "Partial Differential Equation", "Second Order Elliptic Operator", "Trilinear Finite Element Discretizations", "Bottom Up Octree Generation", "Meshing Module", "Geometric Multiplicative Multigrid Module", "Adaptive Mesh Refinement", "Interoctree Transfer", "Multilevel Octree Forest", "Fine Level Mesh", "Matrix Free Implementation", "PDE Operator", "Intergrid Transfer Operation", "Parallel Algorithms", "Adaptive Mesh Refinement", "Finite Element Methods", "Partitioning Algorithms", "Iterative Algorithms", "Multigrid Methods", "Partial Differential Equations", "Mesh Generation", "Sun", "Linux" ], "authors": [ { "affiliation": "Georgia Institute of Technology, Atlanta, 30332, USA", "fullName": "Rahul S. Sampath", "givenName": "Rahul S.", "surname": "Sampath", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Pennsylvania, Philadelphia, 19104, USA", "fullName": "Santi S. Adavani", "givenName": "Santi S.", "surname": "Adavani", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Pennsylvania, Philadelphia, 19104, USA", "fullName": "Hari Sundar", "givenName": "Hari", "surname": "Sundar", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology, Atlanta, 30332, USA", "fullName": "Ilya Lashuk", "givenName": "Ilya", "surname": "Lashuk", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology, Atlanta, 30332, USA", "fullName": "George Biros", "givenName": "George", "surname": "Biros", "__typename": "ArticleAuthorType" } ], "idPrefix": "sc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-11-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": "2167-4329", "isbn": "978-1-4244-2835-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2835i", "articleId": "12OmNzVGcTx", "__typename": "AdjacentArticleType" }, "next": { "fno": "28350001", "articleId": "12OmNBSBk0h", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ipdps/2003/1926/0/19260058a", "title": "Vectorization of Multigrid Codes Using SIMD ISA Extensions", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2003/19260058a/12OmNqIQS8A", "parentPublication": { "id": "proceedings/ipdps/2003/1926/0", "title": "Parallel and Distributed Processing Symposium, International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2012/4675/0/4675a426", "title": "Low-Cost Parallel Algorithms for 2:1 Octree Balance", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2012/4675a426/12OmNx6g6mD", "parentPublication": { "id": "proceedings/ipdps/2012/4675/0", "title": "Parallel and Distributed Processing Symposium, International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2012/0806/0/1000a045", "title": "Parallel geometric-algebraic multigrid on unstructured forests of octrees", "doi": null, "abstractUrl": "/proceedings-article/sc/2012/1000a045/12OmNy7Qfuf", "parentPublication": { "id": "proceedings/sc/2012/0806/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2008/2835/0/28350018", "title": "Dendro: parallel algorithms for multigrid and AMR methods on 2:1 balanced octrees", "doi": null, "abstractUrl": "/proceedings-article/sc/2008/28350018/12OmNz6iOOi", "parentPublication": { "id": "proceedings/sc/2008/2835/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2006/06/c6024", "title": "An Introduction to Algebraic Multigrid", "doi": null, "abstractUrl": "/magazine/cs/2006/06/c6024/13rRUwkxc1e", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2006/06/c6010", "title": "Guest Editors' Introduction: Multigrid Computing", "doi": null, "abstractUrl": "/magazine/cs/2006/06/c6010/13rRUxYIMQn", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2006/06/c6044", "title": "Multigrid Methods on Adaptively Refined Grids", "doi": null, "abstractUrl": "/magazine/cs/2006/06/c6044/13rRUxcsYPK", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/11/ttg2011111663", "title": "A Hexahedral Multigrid Approach for Simulating Cuts in Deformable Objects", "doi": null, "abstractUrl": "/journal/tg/2011/11/ttg2011111663/13rRUy0qnGi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2006/06/c6012", "title": "Why Multigrid Methods Are So Efficient", "doi": null, "abstractUrl": "/magazine/cs/2006/06/c6012/13rRUy2YLOO", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2019/1246/0/124600a101", "title": "Asynchronous Multigrid Methods", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2019/124600a101/1cYhNxfoynm", "parentPublication": { "id": "proceedings/ipdps/2019/1246/0", "title": "2019 IEEE International Parallel and Distributed Processing Symposium (IPDPS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAkWvaO", "title": "Proceedings. Computer Graphics International 2001", "acronym": "cgi", "groupId": "1000132", "volume": "0", "displayVolume": "0", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNzdoN6A", "doi": "10.1109/CGI.2001.934688", "title": "A Marching Voxels Method for Surface Rendering of Volume Data", "normalizedTitle": "A Marching Voxels Method for Surface Rendering of Volume Data", "abstract": "Abstract: The marching cubes method is a well-known surface extraction method by using the surface configurations of cubes for surface rendering of volume data. The marching cubes method has three main disadvantages, time consuming, ambiguity, and holes generation. All these disadvantages come from the use of the surface configurations of cubes. In this paper, we propose an efficient surface extraction method, the marching voxels method, for surface rendering of volume data. Instead of using the surface configurations of cubes, the marching voxels method first generates triangles for inner voxels. Then it combines the triangles of inner voxels to produce the surface of an object. Finally, the surface of an object is projected to a plane to form the final image. Since the marching voxels method considers the combination of triangles of voxels not cubes and the combination of triangles is performed in a deterministic way, there is neither ambiguous case of a combination nor holes for the generated surface. The experimental results show that the marching voxels method saves about 30% of the surface rendering time compared to the marching cubes method for test samples.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract: The marching cubes method is a well-known surface extraction method by using the surface configurations of cubes for surface rendering of volume data. The marching cubes method has three main disadvantages, time consuming, ambiguity, and holes generation. All these disadvantages come from the use of the surface configurations of cubes. In this paper, we propose an efficient surface extraction method, the marching voxels method, for surface rendering of volume data. Instead of using the surface configurations of cubes, the marching voxels method first generates triangles for inner voxels. Then it combines the triangles of inner voxels to produce the surface of an object. Finally, the surface of an object is projected to a plane to form the final image. Since the marching voxels method considers the combination of triangles of voxels not cubes and the combination of triangles is performed in a deterministic way, there is neither ambiguous case of a combination nor holes for the generated surface. The experimental results show that the marching voxels method saves about 30% of the surface rendering time compared to the marching cubes method for test samples.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract: The marching cubes method is a well-known surface extraction method by using the surface configurations of cubes for surface rendering of volume data. The marching cubes method has three main disadvantages, time consuming, ambiguity, and holes generation. All these disadvantages come from the use of the surface configurations of cubes. In this paper, we propose an efficient surface extraction method, the marching voxels method, for surface rendering of volume data. Instead of using the surface configurations of cubes, the marching voxels method first generates triangles for inner voxels. Then it combines the triangles of inner voxels to produce the surface of an object. Finally, the surface of an object is projected to a plane to form the final image. Since the marching voxels method considers the combination of triangles of voxels not cubes and the combination of triangles is performed in a deterministic way, there is neither ambiguous case of a combination nor holes for the generated surface. The experimental results show that the marching voxels method saves about 30% of the surface rendering time compared to the marching cubes method for test samples.", "fno": "10070306", "keywords": [ "Surface Rendering", "Surface Extraction", "Marching Voxels Method", "Marching Cubes Method" ], "authors": [ { "affiliation": "Feng Chia University", "fullName": "Chin-Feng Lin", "givenName": "Chin-Feng", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": "Feng Chia University", "fullName": "Don-Lin Yang", "givenName": "Don-Lin", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Feng Chia University", "fullName": "Yeh-Ching Chung", "givenName": "Yeh-Ching", "surname": "Chung", "__typename": "ArticleAuthorType" } ], "idPrefix": "cgi", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-07-01T00:00:00", "pubType": "proceedings", "pages": "0306", "year": "2001", "issn": null, "isbn": "0-7695-1007-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "10070299", "articleId": "12OmNqC2v3M", "__typename": "AdjacentArticleType" }, "next": { "fno": "10070317", "articleId": "12OmNBAIAPn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyRg4mf", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "acronym": "icvrv", "groupId": "1800579", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNA14A8N", "doi": "10.1109/ICVRV.2014.22", "title": "Editing Fluid Sequences Using Space Warping Technique", "normalizedTitle": "Editing Fluid Sequences Using Space Warping Technique", "abstract": "In this paper, we introduce a novel algorithm for editing fluid sequences using space warping technique. This technique allows us obtain user-specified flow sequence based on original sequence. Our algorithm analyzes the motion of input fluid sequence, computes the geometry transformation between the flow lines of input sequence and the user-specified flow lines, and then synthesizes the edited sequences. By this way, we can solve the computational cost of simulating fluid sequences. And we can obtain different forms of fluid sequences by editing available continuous flow patterns. We use this technique to edit sequences of smoke sequences for experiment in this paper.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we introduce a novel algorithm for editing fluid sequences using space warping technique. This technique allows us obtain user-specified flow sequence based on original sequence. Our algorithm analyzes the motion of input fluid sequence, computes the geometry transformation between the flow lines of input sequence and the user-specified flow lines, and then synthesizes the edited sequences. By this way, we can solve the computational cost of simulating fluid sequences. And we can obtain different forms of fluid sequences by editing available continuous flow patterns. We use this technique to edit sequences of smoke sequences for experiment in this paper.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we introduce a novel algorithm for editing fluid sequences using space warping technique. This technique allows us obtain user-specified flow sequence based on original sequence. Our algorithm analyzes the motion of input fluid sequence, computes the geometry transformation between the flow lines of input sequence and the user-specified flow lines, and then synthesizes the edited sequences. By this way, we can solve the computational cost of simulating fluid sequences. And we can obtain different forms of fluid sequences by editing available continuous flow patterns. We use this technique to edit sequences of smoke sequences for experiment in this paper.", "fno": "6854a403", "keywords": [ "Fluids", "Computational Modeling", "Animation", "Computational Efficiency", "Mathematical Model", "Computer Graphics", "Adaptation Models", "Stream Lines", "Motion Editing", "Fluid Sequence", "Warping" ], "authors": [ { "affiliation": null, "fullName": "Bosheng Zhou", "givenName": "Bosheng", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qing Zuo", "givenName": "Qing", "surname": "Zuo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yue Qi", "givenName": "Yue", "surname": "Qi", "__typename": "ArticleAuthorType" } ], "idPrefix": "icvrv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-08-01T00:00:00", "pubType": "proceedings", "pages": "403-407", "year": "2014", "issn": null, "isbn": "978-1-4799-6854-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "6854a396", "articleId": "12OmNzUgcYC", "__typename": "AdjacentArticleType" }, "next": { "fno": "6854a408", "articleId": "12OmNBU1jL7", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/comwor/1988/0810/0/00004811", "title": "3-D graphics applications in fluid flow simulations", "doi": null, "abstractUrl": "/proceedings-article/comwor/1988/00004811/12OmNBQ2W24", "parentPublication": { "id": "proceedings/comwor/1988/0810/0", "title": "1988 Proceedings. 2nd IEEE Conference on Computer Workstations", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1996/3673/0/36730249", "title": "UFLOW: Visualizing Uncertainty in Fluid Flow", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1996/36730249/12OmNs59JIG", "parentPublication": { "id": "proceedings/ieee-vis/1996/3673/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvmp/2011/4621/0/4621a148", "title": "Space-time Editing of 3D Video Sequences", "doi": null, "abstractUrl": "/proceedings-article/cvmp/2011/4621a148/12OmNzGDsMm", "parentPublication": { "id": "proceedings/cvmp/2011/4621/0", "title": "2011 Conference for Visual Media Production", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446595", "title": "Fluid Sketching&#x2015;Immersive Sketching Based on Fluid Flow", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446595/13bd1eOELL3", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/07/ttp2009071278", "title": "A Stochastic Filtering Technique for Fluid Flow Velocity Fields Tracking", "doi": null, "abstractUrl": "/journal/tp/2009/07/ttp2009071278/13rRUIJuxqL", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/02/ttg2013020178", "title": "View-Dependent Multiscale Fluid Simulation", "doi": null, "abstractUrl": "/journal/tg/2013/02/ttg2013020178/13rRUxAAST8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050682", "title": "Creature Control in a Fluid Environment", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050682/13rRUxZRbnZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/06/09707648", "title": "Impulse Fluid Simulation", "doi": null, "abstractUrl": "/journal/tg/2023/06/09707648/1APlBguxdSw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600d657", "title": "Controllable Animation of Fluid Elements in Still Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600d657/1H0KVYPJTvq", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a472", "title": "3D Fluid Volume Editing based on a Bidirectional Time Coupling Optimization Approach", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a472/1tnXuD7vyIU", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzcxYVJ", "title": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)", "acronym": "icis", "groupId": "1001200", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNAsBFO1", "doi": "10.1109/ICIS.2017.7960052", "title": "Contained fluid simulation based on game engine", "normalizedTitle": "Contained fluid simulation based on game engine", "abstract": "Ordinary Fluid Simulation focuses on the effects of the fluid surface, like seas, rivers and lakes, which is massive and unshakable. Few researches have devoted to simulate the phenomenon of fluid in containers, especially the behaviors of contained fluid being moved or tilted. This paper concentrates on dynamic simulation of fluid in containers. Based on Unity3D and Smoothed Particle Hydrodynamics (SPH), we proposes an interactive application, which demonstrates how to manipulate fluid in containers, such as shaking a cup of water, tilting and pouring water from one cup to another.", "abstracts": [ { "abstractType": "Regular", "content": "Ordinary Fluid Simulation focuses on the effects of the fluid surface, like seas, rivers and lakes, which is massive and unshakable. Few researches have devoted to simulate the phenomenon of fluid in containers, especially the behaviors of contained fluid being moved or tilted. This paper concentrates on dynamic simulation of fluid in containers. Based on Unity3D and Smoothed Particle Hydrodynamics (SPH), we proposes an interactive application, which demonstrates how to manipulate fluid in containers, such as shaking a cup of water, tilting and pouring water from one cup to another.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Ordinary Fluid Simulation focuses on the effects of the fluid surface, like seas, rivers and lakes, which is massive and unshakable. Few researches have devoted to simulate the phenomenon of fluid in containers, especially the behaviors of contained fluid being moved or tilted. This paper concentrates on dynamic simulation of fluid in containers. Based on Unity3D and Smoothed Particle Hydrodynamics (SPH), we proposes an interactive application, which demonstrates how to manipulate fluid in containers, such as shaking a cup of water, tilting and pouring water from one cup to another.", "fno": "07960052", "keywords": [ "Fluids", "Containers", "Solid Modeling", "Rendering Computer Graphics", "Kernel", "Games", "Computational Modeling", "Fluid Simulation", "SPH Algorithm", "Unity SD", "Particle System", "Shader" ], "authors": [ { "affiliation": "School of Computer Science, Communication University of China, Beijing, China", "fullName": "Wenfeng Hu", "givenName": "Wenfeng", "surname": "Hu", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science, Communication University of China, Beijing, China", "fullName": "Zhe Wang", "givenName": "Zhe", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science, Communication University of China, Beijing, China", "fullName": "Xin Fan", "givenName": "Xin", "surname": "Fan", "__typename": "ArticleAuthorType" } ], "idPrefix": "icis", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2017-05-01T00:00:00", "pubType": "proceedings", "pages": "545-549", "year": "2017", "issn": null, "isbn": "978-1-5090-5507-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07960051", "articleId": "12OmNB06l56", "__typename": "AdjacentArticleType" }, "next": { "fno": "07960053", "articleId": "12OmNCgJe6h", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/percomw/2010/6605/0/05470653", "title": "Towards wearable sensing-based assessment of fluid intake", "doi": null, "abstractUrl": "/proceedings-article/percomw/2010/05470653/12OmNBr4erZ", "parentPublication": { "id": "proceedings/percomw/2010/6605/0", "title": "2010 8th IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2013/5001/0/06655759", "title": "A Comparative Analysis of Fluid Simulation Methods Based on SPH", "doi": null, "abstractUrl": "/proceedings-article/svr/2013/06655759/12OmNqBtiNf", "parentPublication": { "id": "proceedings/svr/2013/5001/0", "title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2015/7673/0/7673a269", "title": "A Shape-Maintained and Low-Dissipation Fluid Guiding Pipeline", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2015/7673a269/12OmNwE9Ou6", "parentPublication": { "id": "proceedings/icvrv/2015/7673/0", "title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032b889", "title": "See the Glass Half Full: Reasoning About Liquid Containers, Their Volume and Content", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b889/12OmNxy4MYd", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2011/4602/0/4602a164", "title": "SPH-Based Fluid Simulation: A Survey", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2011/4602a164/12OmNyKa6dj", "parentPublication": { "id": "proceedings/icvrv/2011/4602/0", "title": "2011 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446595", "title": "Fluid Sketching&#x2015;Immersive Sketching Based on Fluid Flow", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446595/13bd1eOELL3", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/02/ttg2013020178", "title": "View-Dependent Multiscale Fluid Simulation", "doi": null, "abstractUrl": "/journal/tg/2013/02/ttg2013020178/13rRUxAAST8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcice/2022/6067/0/606700a037", "title": "A Volume of Fluid Method for Simulation of Reservoir Area Landslide Surge Under Quasi-fluid Assumption", "doi": null, "abstractUrl": "/proceedings-article/jcice/2022/606700a037/1H0KkHIzDBC", "parentPublication": { "id": "proceedings/jcice/2022/6067/0", "title": "2022 International Joint Conference on Information and Communication Engineering (JCICE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdiime/2022/9009/0/900900a048", "title": "Solid-Fluid Interaction Simulation System Based on SPH Unified Particle Framework", "doi": null, "abstractUrl": "/proceedings-article/icdiime/2022/900900a048/1Iz56eSpj3y", "parentPublication": { "id": "proceedings/icdiime/2022/9009/0", "title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2020/9234/0/923400a045", "title": "A Controllable Spring Force Based Method for Fluid Surface Disturbance Details Simulation", "doi": null, "abstractUrl": "/proceedings-article/icdh/2020/923400a045/1uGXV1Qs8IU", "parentPublication": { "id": "proceedings/icdh/2020/9234/0", "title": "2020 8th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyoiYVn", "title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)", "acronym": "icvrv", "groupId": "1800579", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNwE9Ou6", "doi": "10.1109/ICVRV.2015.26", "title": "A Shape-Maintained and Low-Dissipation Fluid Guiding Pipeline", "normalizedTitle": "A Shape-Maintained and Low-Dissipation Fluid Guiding Pipeline", "abstract": "Fluid guiding methods could generate high-resolution simulation results from basic low-resolution simulation results, rather than direct simulation. However, because of dissipation, the high-resolution results always leads to huge shape differences from low-resolution inputs. Previous fluid guiding methods often use complex fluid control methods or artificial force to reduce shape differences. Such approaches need to change the existing fluid simulation method as input. In contrast, we propose a novel fluid guiding pipeline by two basic groups of low-resolution simulation results. It takes balance between physical accuracy and fluid shape, without changing the existing generation method. In addition, we proposed an objective shape similarity evaluation criterion based on finite-time Lyapunov exponent, in order to determine the similarity between fluid shapes. Experimental results demonstrated that our pipeline could quickly generate shape-maintained and low-dissipation high-resolution simulation results. It is much easier to implement than previous fluid guiding methods.", "abstracts": [ { "abstractType": "Regular", "content": "Fluid guiding methods could generate high-resolution simulation results from basic low-resolution simulation results, rather than direct simulation. However, because of dissipation, the high-resolution results always leads to huge shape differences from low-resolution inputs. Previous fluid guiding methods often use complex fluid control methods or artificial force to reduce shape differences. Such approaches need to change the existing fluid simulation method as input. In contrast, we propose a novel fluid guiding pipeline by two basic groups of low-resolution simulation results. It takes balance between physical accuracy and fluid shape, without changing the existing generation method. In addition, we proposed an objective shape similarity evaluation criterion based on finite-time Lyapunov exponent, in order to determine the similarity between fluid shapes. Experimental results demonstrated that our pipeline could quickly generate shape-maintained and low-dissipation high-resolution simulation results. It is much easier to implement than previous fluid guiding methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Fluid guiding methods could generate high-resolution simulation results from basic low-resolution simulation results, rather than direct simulation. However, because of dissipation, the high-resolution results always leads to huge shape differences from low-resolution inputs. Previous fluid guiding methods often use complex fluid control methods or artificial force to reduce shape differences. Such approaches need to change the existing fluid simulation method as input. In contrast, we propose a novel fluid guiding pipeline by two basic groups of low-resolution simulation results. It takes balance between physical accuracy and fluid shape, without changing the existing generation method. In addition, we proposed an objective shape similarity evaluation criterion based on finite-time Lyapunov exponent, in order to determine the similarity between fluid shapes. Experimental results demonstrated that our pipeline could quickly generate shape-maintained and low-dissipation high-resolution simulation results. It is much easier to implement than previous fluid guiding methods.", "fno": "7673a269", "keywords": [ "Fluids", "Shape", "Pipelines", "Computational Modeling", "Simulation", "Extrapolation", "Interpolation", "Shape Similarity Evaluation", "Fluid Simulatio", "Fluid Guiding", "Shape Maintained", "Low Dissipation" ], "authors": [ { "affiliation": null, "fullName": "Cheng Yang", "givenName": "Cheng", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xubo Yang", "givenName": "Xubo", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhenyi He", "givenName": "Zhenyi", "surname": "He", "__typename": "ArticleAuthorType" } ], "idPrefix": "icvrv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-10-01T00:00:00", "pubType": "proceedings", "pages": "269-276", "year": "2015", "issn": null, "isbn": "978-1-4673-7673-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7673a264", "articleId": "12OmNyPQ4SW", "__typename": "AdjacentArticleType" }, "next": { "fno": "7673a277", "articleId": "12OmNzT7OvQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cmpeur/1993/4030/0/00289872", "title": "Design by interpolation", "doi": null, "abstractUrl": "/proceedings-article/cmpeur/1993/00289872/12OmNAlvHXR", "parentPublication": { "id": "proceedings/cmpeur/1993/4030/0", "title": "Proceedings of COMPEURO '93", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2017/5507/0/07960052", "title": "Contained fluid simulation based on game engine", "doi": null, "abstractUrl": "/proceedings-article/icis/2017/07960052/12OmNAsBFO1", "parentPublication": { "id": "proceedings/icis/2017/5507/0", "title": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtss/2014/7288/0/7288a041", "title": "MC-Fluid: Fluid Model-Based Mixed-Criticality Scheduling on Multiprocessors", "doi": null, "abstractUrl": "/proceedings-article/rtss/2014/7288a041/12OmNBp52I2", "parentPublication": { "id": "proceedings/rtss/2014/7288/0", "title": "2014 IEEE Real-Time Systems Symposium (RTSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/peits/2008/3342/0/3342a485", "title": "Modeling of Urban Traffic System Based on Dynamic Stochastic Fluid Petri Net", "doi": null, "abstractUrl": "/proceedings-article/peits/2008/3342a485/12OmNButq6F", "parentPublication": { "id": "proceedings/peits/2008/3342/0", "title": "2008 Workshop on Power Electronics and Intelligent Transportation System", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icie/2010/4080/4/05572732", "title": "Fluid Temperature and Power Estimation of Geothermal Power Plants by a Simplified Numerical Model", "doi": null, "abstractUrl": "/proceedings-article/icie/2010/05572732/12OmNqFa5pD", "parentPublication": { "id": "proceedings/icie/2010/4080/3", "title": "Information Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2009/3583/2/3583b267", "title": "Numerical Simulation of Fluid Flow in Thin Slab Continuous Casting Mould", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583b267/12OmNvjgWVh", "parentPublication": { "id": "proceedings/icmtma/2009/3583/2", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446595", "title": "Fluid Sketching&#x2015;Immersive Sketching Based on Fluid Flow", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446595/13bd1eOELL3", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/01/mcg2015010074", "title": "Toward Better Surface Tracking for Fluid Simulation", "doi": null, "abstractUrl": "/magazine/cg/2015/01/mcg2015010074/13rRUILLkG7", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/02/ttg2013020178", "title": "View-Dependent Multiscale Fluid Simulation", "doi": null, "abstractUrl": "/journal/tg/2013/02/ttg2013020178/13rRUxAAST8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10015628", "title": "Full-Volume 3D Fluid Flow Reconstruction With Light Field PIV", "doi": null, "abstractUrl": "/journal/tp/5555/01/10015628/1JR6d0EQ2o8", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx5GU2m", "title": "2010 Brazilian Symposium on Games and Digital Entertainment", "acronym": "sbgames", "groupId": "1800056", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNyQYtk2", "doi": "10.1109/SBGAMES.2010.25", "title": "Fluid Simulation with Two-Way Interaction Rigid Body Using a Heterogeneous GPU and CPU Environment", "normalizedTitle": "Fluid Simulation with Two-Way Interaction Rigid Body Using a Heterogeneous GPU and CPU Environment", "abstract": "Simulation of natural phenomena, such as water and smoke, is a very important topic to increase real time scene realism in video-games. Besides the graphical aspect, in order to achieve realism, it is necessary to correctly simulate and solve its complex governing equations, requiring an intense computational work.Fluid simulation is achieved by solving the Navier-Stokes set of equations, using a numerical method in CPU or GPU, independently, as these equations do not have an analytical solution. The real time simulacraon also requires the simulation of interaction of the particles with objects in the scene, requiring many collision and contact forces calculation, which may drastically increase the computational time. In this paper we propose an heterogeneous multicore CPU and GPU hybrid architecture for fluid simulation with two-ways of interaction between them, and with a fine granularity control over rigid body's shape collision. We also show the impact of this heterogeneous architecture over GPU and CPU bounded simulations, which is commonly used for this kind of application. The heterogeneous architecture developed in this work is developed to best fit the Single Instruction Multiple Thread (SIMT) model used by GPUs in all simulation stages, allowing a high level performance increase.", "abstracts": [ { "abstractType": "Regular", "content": "Simulation of natural phenomena, such as water and smoke, is a very important topic to increase real time scene realism in video-games. Besides the graphical aspect, in order to achieve realism, it is necessary to correctly simulate and solve its complex governing equations, requiring an intense computational work.Fluid simulation is achieved by solving the Navier-Stokes set of equations, using a numerical method in CPU or GPU, independently, as these equations do not have an analytical solution. The real time simulacraon also requires the simulation of interaction of the particles with objects in the scene, requiring many collision and contact forces calculation, which may drastically increase the computational time. In this paper we propose an heterogeneous multicore CPU and GPU hybrid architecture for fluid simulation with two-ways of interaction between them, and with a fine granularity control over rigid body's shape collision. We also show the impact of this heterogeneous architecture over GPU and CPU bounded simulations, which is commonly used for this kind of application. The heterogeneous architecture developed in this work is developed to best fit the Single Instruction Multiple Thread (SIMT) model used by GPUs in all simulation stages, allowing a high level performance increase.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Simulation of natural phenomena, such as water and smoke, is a very important topic to increase real time scene realism in video-games. Besides the graphical aspect, in order to achieve realism, it is necessary to correctly simulate and solve its complex governing equations, requiring an intense computational work.Fluid simulation is achieved by solving the Navier-Stokes set of equations, using a numerical method in CPU or GPU, independently, as these equations do not have an analytical solution. The real time simulacraon also requires the simulation of interaction of the particles with objects in the scene, requiring many collision and contact forces calculation, which may drastically increase the computational time. In this paper we propose an heterogeneous multicore CPU and GPU hybrid architecture for fluid simulation with two-ways of interaction between them, and with a fine granularity control over rigid body's shape collision. We also show the impact of this heterogeneous architecture over GPU and CPU bounded simulations, which is commonly used for this kind of application. The heterogeneous architecture developed in this work is developed to best fit the Single Instruction Multiple Thread (SIMT) model used by GPUs in all simulation stages, allowing a high level performance increase.", "fno": "4359a156", "keywords": [ "Smoothed Particle Hydrodynamics", "Fluids", "CUDA", "GPU Aceleration" ], "authors": [ { "affiliation": null, "fullName": "José Ricardo da S. Junior", "givenName": "José Ricardo da S.", "surname": "Junior", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Esteban W. Clua", "givenName": "Esteban W.", "surname": "Clua", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Anselmo Montenegro", "givenName": "Anselmo", "surname": "Montenegro", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Paulo A. Pagliosa", "givenName": "Paulo A.", "surname": "Pagliosa", "__typename": "ArticleAuthorType" } ], "idPrefix": "sbgames", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-11-01T00:00:00", "pubType": "proceedings", "pages": "156-164", "year": "2010", "issn": null, "isbn": "978-0-7695-4359-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4359a146", "articleId": "12OmNwF0BS5", "__typename": "AdjacentArticleType" }, "next": { "fno": "4359a165", "articleId": "12OmNyFU6XP", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccis/2012/4789/0/4789a958", "title": "Communication and Memory Access Latency Characteristics of CPU/GPU Heterogeneous Cluster", "doi": null, "abstractUrl": "/proceedings-article/iccis/2012/4789a958/12OmNAOKnVw", "parentPublication": { "id": "proceedings/iccis/2012/4789/0", "title": "2012 Fourth International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/incos/2011/4579/0/4579a209", "title": "An OpenMP Compiler for Hybrid CPU/GPU Computing Architecture", "doi": null, "abstractUrl": "/proceedings-article/incos/2011/4579a209/12OmNARRYsz", "parentPublication": { "id": "proceedings/incos/2011/4579/0", "title": "Intelligent Networking and Collaborative Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sccompanion/2012/4956/0/4956a166", "title": "An Analysis of a Distributed GPU Implementation of Proton Computed Tomographic (pCT) Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/sccompanion/2012/4956a166/12OmNApLGLc", "parentPublication": { "id": "proceedings/sccompanion/2012/4956/0", "title": "2012 SC Companion: High Performance Computing, Networking Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpp/2009/3802/0/3802a550", "title": "Accelerating Lattice Boltzmann Fluid Flow Simulations Using Graphics Processors", "doi": null, "abstractUrl": "/proceedings-article/icpp/2009/3802a550/12OmNB1wkOA", "parentPublication": { "id": "proceedings/icpp/2009/3802/0", "title": "2009 International Conference on Parallel Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit/2009/3836/2/3836b254", "title": "GPU Acceleration of High-Speed Collision Molecular Dynamics Simulation", "doi": null, "abstractUrl": "/proceedings-article/cit/2009/3836b254/12OmNBSBk4o", "parentPublication": { "id": "proceedings/cit/2009/3836/2", "title": "Computer and Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pads/2011/1363/0/05936750", "title": "Two-Way Real Time Fluid Simulation Using a Heterogeneous Multicore CPU and GPU Architecture", "doi": null, "abstractUrl": "/proceedings-article/pads/2011/05936750/12OmNBd9T1o", "parentPublication": { "id": "proceedings/pads/2011/1363/0", "title": "2011 IEEE Workshop on Principles of Advanced and Distributed Simulation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifcsta/2009/3930/1/3930a259", "title": "CUDA-Based Jacobi's Iterative Method", "doi": null, "abstractUrl": "/proceedings-article/ifcsta/2009/3930a259/12OmNBhpRZc", "parentPublication": { "id": "proceedings/ifcsta/2009/3930/3", "title": "Computer Science-Technology and Applications, International Forum on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsa/2010/3999/0/3999a118", "title": "A CUDA-Based Implementation of Stable Fluids in 3D with Internal and Moving Boundaries", "doi": null, "abstractUrl": "/proceedings-article/iccsa/2010/3999a118/12OmNvnwVim", "parentPublication": { "id": "proceedings/iccsa/2010/3999/0", "title": "2010 International Conference on Computational Science and Its Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccms/2010/3941/1/3941a290", "title": "A Particle-Based Unified Model for Non-Newtonian Fluid Simulation", "doi": null, "abstractUrl": "/proceedings-article/iccms/2010/3941a290/12OmNzzfToR", "parentPublication": { "id": "proceedings/iccms/2010/3941/3", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdiime/2022/9009/0/900900a048", "title": "Solid-Fluid Interaction Simulation System Based on SPH Unified Particle Framework", "doi": null, "abstractUrl": "/proceedings-article/icdiime/2022/900900a048/1Iz56eSpj3y", "parentPublication": { "id": "proceedings/icdiime/2022/9009/0", "title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H0KVYPJTvq", "doi": "10.1109/CVPR52688.2022.00365", "title": "Controllable Animation of Fluid Elements in Still Images", "normalizedTitle": "Controllable Animation of Fluid Elements in Still Images", "abstract": "We propose a method to interactively control the animation of fluid elements in still images to generate cinemagraphs. Specifically, we focus on the animation of fluid elements like water, smoke, fire, which have the properties of repeating textures and continuous fluid motion. Taking inspiration from prior works, we represent the motion of such fluid elements in the image in the form of a constant 2D optical flow map. To this end, we allow the user to provide any number of arrow directions and their associated speeds along with a mask of the regions the user wants to animate. The user-provided input arrow directions, their corresponding speed values, and the mask are then converted into a dense flow map representing a constant optical flow map (F<inf>D</inf>). We observe that F<inf>D</inf>, obtained using simple exponential operations can closely approximate the plausible motion of elements in the image. We further refine computed dense optical flow map F<inf>D</inf> using a generative-adversarial network (GAN) to obtain a more realistic flow map. We devise a novel UNet based architecture to autoregressively generate future frames using the refined optical flow map by forward-warping the input image features at different resolutions. We conduct extensive experiments on a publicly available dataset and show that our method is superior to the baselines in terms of qualitative and quantitative metrics. In addition, we show the qualitative animations of the objects in directions that did not exist in the training set and provide a way to synthesize videos that otherwise would not exist in the real world. Project url: https://controllable-cinemagraphs.github.io/", "abstracts": [ { "abstractType": "Regular", "content": "We propose a method to interactively control the animation of fluid elements in still images to generate cinemagraphs. Specifically, we focus on the animation of fluid elements like water, smoke, fire, which have the properties of repeating textures and continuous fluid motion. Taking inspiration from prior works, we represent the motion of such fluid elements in the image in the form of a constant 2D optical flow map. To this end, we allow the user to provide any number of arrow directions and their associated speeds along with a mask of the regions the user wants to animate. The user-provided input arrow directions, their corresponding speed values, and the mask are then converted into a dense flow map representing a constant optical flow map (F<inf>D</inf>). We observe that F<inf>D</inf>, obtained using simple exponential operations can closely approximate the plausible motion of elements in the image. We further refine computed dense optical flow map F<inf>D</inf> using a generative-adversarial network (GAN) to obtain a more realistic flow map. We devise a novel UNet based architecture to autoregressively generate future frames using the refined optical flow map by forward-warping the input image features at different resolutions. We conduct extensive experiments on a publicly available dataset and show that our method is superior to the baselines in terms of qualitative and quantitative metrics. In addition, we show the qualitative animations of the objects in directions that did not exist in the training set and provide a way to synthesize videos that otherwise would not exist in the real world. Project url: https://controllable-cinemagraphs.github.io/", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a method to interactively control the animation of fluid elements in still images to generate cinemagraphs. Specifically, we focus on the animation of fluid elements like water, smoke, fire, which have the properties of repeating textures and continuous fluid motion. Taking inspiration from prior works, we represent the motion of such fluid elements in the image in the form of a constant 2D optical flow map. To this end, we allow the user to provide any number of arrow directions and their associated speeds along with a mask of the regions the user wants to animate. The user-provided input arrow directions, their corresponding speed values, and the mask are then converted into a dense flow map representing a constant optical flow map (FD). We observe that FD, obtained using simple exponential operations can closely approximate the plausible motion of elements in the image. We further refine computed dense optical flow map FD using a generative-adversarial network (GAN) to obtain a more realistic flow map. We devise a novel UNet based architecture to autoregressively generate future frames using the refined optical flow map by forward-warping the input image features at different resolutions. We conduct extensive experiments on a publicly available dataset and show that our method is superior to the baselines in terms of qualitative and quantitative metrics. In addition, we show the qualitative animations of the objects in directions that did not exist in the training set and provide a way to synthesize videos that otherwise would not exist in the real world. Project url: https://controllable-cinemagraphs.github.io/", "fno": "694600d657", "keywords": [ "Computer Animation", "Image Motion Analysis", "Image Sequences", "Image Texture", "Video Signal Processing", "Controllable Animation", "Fluid Elements", "Continuous Fluid Motion", "Constant 2 D Optical Flow Map", "User Provided Input Arrow Directions", "Dense Flow Map", "Constant Optical Flow Map", "Computed Dense Optical Flow Map FD", "Realistic Flow Map", "Refined Optical Flow Map", "Input Image", "Qualitative Animations", "Water", "Training", "Measurement", "Computer Vision", "Image Motion Analysis", "Fluids", "Animation" ], "authors": [ { "affiliation": "Adobe Research India", "fullName": "Aniruddha Mahapatra", "givenName": "Aniruddha", "surname": "Mahapatra", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research India", "fullName": "Kuldeep Kulkarni", "givenName": "Kuldeep", "surname": "Kulkarni", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "3657-3666", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "694600d647", "articleId": "1H0LgsuDUHe", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600d667", "articleId": "1H0NhlyUpl6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icvrv/2014/6854/0/6854a403", "title": "Editing Fluid Sequences Using Space Warping Technique", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2014/6854a403/12OmNA14A8N", "parentPublication": { "id": "proceedings/icvrv/2014/6854/0", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/1997/7825/0/78250178", "title": "Controlling Fluid Animation", "doi": null, "abstractUrl": "/proceedings-article/cgi/1997/78250178/12OmNwDj19J", "parentPublication": { "id": "proceedings/cgi/1997/7825/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2011/4602/0/4602a164", "title": "SPH-Based Fluid Simulation: A Survey", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2011/4602a164/12OmNyKa6dj", "parentPublication": { "id": "proceedings/icvrv/2011/4602/0", "title": "2011 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2011/4648/0/4648a148", "title": "Fluid Animation on Arbitrarily-Shaped Structured Grids", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2011/4648a148/12OmNzb7Zrb", "parentPublication": { "id": "proceedings/sbgames/2011/4648/0", "title": "2011 Brazilian Symposium on Games and Digital Entertainment", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446595", "title": "Fluid Sketching&#x2015;Immersive Sketching Based on Fluid Flow", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446595/13bd1eOELL3", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/02/ttg2013020178", "title": "View-Dependent Multiscale Fluid Simulation", "doi": null, "abstractUrl": "/journal/tg/2013/02/ttg2013020178/13rRUxAAST8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050682", "title": "Creature Control in a Fluid Environment", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050682/13rRUxZRbnZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10015628", "title": "Full-Volume 3D Fluid Flow Reconstruction With Light Field PIV", "doi": null, "abstractUrl": "/journal/tp/5555/01/10015628/1JR6d0EQ2o8", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2020/9234/0/923400a045", "title": "A Controllable Spring Force Based Method for Fluid Surface Disturbance Details Simulation", "doi": null, "abstractUrl": "/proceedings-article/icdh/2020/923400a045/1uGXV1Qs8IU", "parentPublication": { "id": "proceedings/icdh/2020/9234/0", "title": "2020 8th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900f534", "title": "EffiScene: Efficient Per-Pixel Rigidity Inference for Unsupervised Joint Learning of Optical Flow, Depth, Camera Pose and Motion Segmentation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900f534/1yeKJgdoKRy", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1zw5CYExBa8", "title": "2021 Ninth International Symposium on Computing and Networking Workshops (CANDARW)", "acronym": "candarw", "groupId": "1829704", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1zw5EbMhuwM", "doi": "10.1109/CANDARW53999.2021.00085", "title": "Real-Time Execution based on Fluid Scheduling by using IPC Control Scheme", "normalizedTitle": "Real-Time Execution based on Fluid Scheduling by using IPC Control Scheme", "abstract": "Fluid scheduling is an optimal real-time scheduling applicable to multiprocessor systems and is a scheduling model so that every real-time task is executed at a constant speed from release time to deadline. It is necessary for fluid scheduling to control the execution speed of each task. But, since the execution speed of the task is invariable in a conventional general purpose processor. Therefore, fluid scheduling is conventionally achieved by repeatedly executing and stopping tasks. However, this method incurs significant overheads due to frequent task switching. On the other hand, fluid scheduling without overhead by using an IPC control scheme has been proposed. The IPC control scheme controls the execution speed of each thread in an SMT processor. We propose a new IPC control scheme to extract thread-level parallelism and improve throughput effectively, also improving the schedulability of the fluid schedule. The evaluation results show that the proposed method improved the total throughput of the fluid schedule.", "abstracts": [ { "abstractType": "Regular", "content": "Fluid scheduling is an optimal real-time scheduling applicable to multiprocessor systems and is a scheduling model so that every real-time task is executed at a constant speed from release time to deadline. It is necessary for fluid scheduling to control the execution speed of each task. But, since the execution speed of the task is invariable in a conventional general purpose processor. Therefore, fluid scheduling is conventionally achieved by repeatedly executing and stopping tasks. However, this method incurs significant overheads due to frequent task switching. On the other hand, fluid scheduling without overhead by using an IPC control scheme has been proposed. The IPC control scheme controls the execution speed of each thread in an SMT processor. We propose a new IPC control scheme to extract thread-level parallelism and improve throughput effectively, also improving the schedulability of the fluid schedule. The evaluation results show that the proposed method improved the total throughput of the fluid schedule.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Fluid scheduling is an optimal real-time scheduling applicable to multiprocessor systems and is a scheduling model so that every real-time task is executed at a constant speed from release time to deadline. It is necessary for fluid scheduling to control the execution speed of each task. But, since the execution speed of the task is invariable in a conventional general purpose processor. Therefore, fluid scheduling is conventionally achieved by repeatedly executing and stopping tasks. However, this method incurs significant overheads due to frequent task switching. On the other hand, fluid scheduling without overhead by using an IPC control scheme has been proposed. The IPC control scheme controls the execution speed of each thread in an SMT processor. We propose a new IPC control scheme to extract thread-level parallelism and improve throughput effectively, also improving the schedulability of the fluid schedule. The evaluation results show that the proposed method improved the total throughput of the fluid schedule.", "fno": "283500a459", "keywords": [ "Multiprocessing Systems", "Multi Threading", "Processor Scheduling", "Real Time Systems", "Fluid Scheduling", "IPC Control Scheme", "Real Time Scheduling", "Real Time Task", "Execution Speed", "Real Time Execution", "Frequent Task Switching", "SMT Processor", "Thread Level Parallelism", "General Purpose Processor", "Schedules", "Fluids", "Processor Scheduling", "Instruction Sets", "Conferences", "Process Control", "Switches", "Real Time Scheduling", "Fluid Scheduling", "IPC Control", "SMT Processor" ], "authors": [ { "affiliation": "Keio University,Graduate School of Science and Technology,Kouhoku-ku, Yokohama-shi,Kanagawa,Japan,223-8852", "fullName": "Atsushi Santo", "givenName": "Atsushi", "surname": "Santo", "__typename": "ArticleAuthorType" }, { "affiliation": "Keio University,Graduate School of Science and Technology,Kouhoku-ku, Yokohama-shi,Kanagawa,Japan,223-8852", "fullName": "Nobuyuki Yamasaki", "givenName": "Nobuyuki", "surname": "Yamasaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "candarw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-11-01T00:00:00", "pubType": "proceedings", "pages": "459-463", "year": "2021", "issn": null, "isbn": "978-1-6654-2835-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "283500a454", "articleId": "1zw5Eq5jY0o", "__typename": "AdjacentArticleType" }, "next": { "fno": "283500a464", "articleId": "1zw5M6pmCME", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/rtss/2014/7288/0/7288a041", "title": "MC-Fluid: Fluid Model-Based Mixed-Criticality Scheduling on Multiprocessors", "doi": null, "abstractUrl": "/proceedings-article/rtss/2014/7288a041/12OmNBp52I2", "parentPublication": { "id": "proceedings/rtss/2014/7288/0", "title": "2014 IEEE Real-Time Systems Symposium (RTSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtss/2015/9507/0/9507a327", "title": "MC-Fluid: Simplified and Optimally Quantified", "doi": null, "abstractUrl": "/proceedings-article/rtss/2015/9507a327/12OmNC943PT", "parentPublication": { "id": "proceedings/rtss/2015/9507/0", "title": "2015 IEEE Real-Time Systems Symposium (RTSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pact/2014/2809/0/07855897", "title": "Warp-aware trace scheduling for GPUs", "doi": null, "abstractUrl": "/proceedings-article/pact/2014/07855897/12OmNxecSa1", "parentPublication": { "id": "proceedings/pact/2014/2809/0", "title": "2014 23rd International Conference on Parallel Architecture and Compilation (PACT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtcsa/2014/3953/0/06910497", "title": "Partitioned multiprocessor scheduling of mixed-criticality parallel jobs", "doi": null, "abstractUrl": "/proceedings-article/rtcsa/2014/06910497/12OmNzYNNdp", "parentPublication": { "id": "proceedings/rtcsa/2014/3953/0", "title": "2014 IEEE 20th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2018/07/08259245", "title": "System-Wide Time versus Density Tradeoff in Real-Time Multicore Fluid Scheduling", "doi": null, "abstractUrl": "/journal/tc/2018/07/08259245/13rRUyuvRwI", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2018/04/08059775", "title": "MC-Fluid: Multi-Core Fluid-Based Mixed-Criticality Scheduling", "doi": null, "abstractUrl": "/journal/tc/2018/04/08059775/13rRUzpzeAk", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2022/0883/0/088300c182", "title": "Fluid: Dataset Abstraction and Elastic Acceleration for Cloud-native Deep Learning Training Jobs", "doi": null, "abstractUrl": "/proceedings-article/icde/2022/088300c182/1FwFoGfzlU4", "parentPublication": { "id": "proceedings/icde/2022/0883/0", "title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2019/2583/0/258300a402", "title": "Implementation of Fluid Scheduling Using IPC Control Mechanism", "doi": null, "abstractUrl": "/proceedings-article/icpads/2019/258300a402/1h5WowrL5fi", "parentPublication": { "id": "proceedings/icpads/2019/2583/0", "title": "2019 IEEE 25th International Conference on Parallel and Distributed Systems (ICPADS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtas/2020/5499/0/09113106", "title": "Bringing Inter-Thread Cache Benefits to Federated Scheduling", "doi": null, "abstractUrl": "/proceedings-article/rtas/2020/09113106/1ky1g2jZgxW", "parentPublication": { "id": "proceedings/rtas/2020/5499/0", "title": "2020 IEEE Real-Time and Embedded Technology and Applications Symposium (RTAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rtcsa/2021/4188/0/418800a103", "title": "Reserving Processors by Precise Scheduling of Mixed-Criticality Tasks", "doi": null, "abstractUrl": "/proceedings-article/rtcsa/2021/418800a103/1xeWQbfCrpC", "parentPublication": { "id": "proceedings/rtcsa/2021/4188/0", "title": "2021 IEEE 27th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzzxuxr", "title": "4th International Conference on Digital Home (ICDH)", "acronym": "icdh", "groupId": "1802037", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNBigFnh", "doi": "10.1109/ICDH.2012.64", "title": "Calligraphy Beautification Method for Chinese Handwritings", "normalizedTitle": "Calligraphy Beautification Method for Chinese Handwritings", "abstract": "This paper propose a new calligraphy beautification method for online handwritten Chinese character. Handwriting stroke trajectories are input from a mouse or a touch pad. After the computer recognizes this stroke, we retrieve the most similarity standard stroke from a parametric standard stroke library, and then we render the user stroke by the method of learning the parametric information of the standard stroke. Finally, a corresponding Kai style character template is used for adjusting the overall architecture of the user-input character. The result proves that our beautification method can preserve user's personality as well as render the user's handwritings with the Kai style calligraphic effect well.", "abstracts": [ { "abstractType": "Regular", "content": "This paper propose a new calligraphy beautification method for online handwritten Chinese character. Handwriting stroke trajectories are input from a mouse or a touch pad. After the computer recognizes this stroke, we retrieve the most similarity standard stroke from a parametric standard stroke library, and then we render the user stroke by the method of learning the parametric information of the standard stroke. Finally, a corresponding Kai style character template is used for adjusting the overall architecture of the user-input character. The result proves that our beautification method can preserve user's personality as well as render the user's handwritings with the Kai style calligraphic effect well.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper propose a new calligraphy beautification method for online handwritten Chinese character. Handwriting stroke trajectories are input from a mouse or a touch pad. After the computer recognizes this stroke, we retrieve the most similarity standard stroke from a parametric standard stroke library, and then we render the user stroke by the method of learning the parametric information of the standard stroke. Finally, a corresponding Kai style character template is used for adjusting the overall architecture of the user-input character. The result proves that our beautification method can preserve user's personality as well as render the user's handwritings with the Kai style calligraphic effect well.", "fno": "4899a122", "keywords": [ "Handwritten Chinese Character Beautification", "Stroke Parameterization", "Stroke Recognization" ], "authors": [ { "affiliation": null, "fullName": "Hailong Li", "givenName": "Hailong", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Peng Liu", "givenName": "Peng", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Songhua Xu", "givenName": "Songhua", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shujin Lin", "givenName": "Shujin", "surname": "Lin", "__typename": "ArticleAuthorType" } ], "idPrefix": "icdh", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "122-127", "year": "2012", "issn": null, "isbn": "978-1-4673-1348-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4899a117", "articleId": "12OmNxGSmhL", "__typename": "AdjacentArticleType" }, "next": { "fno": "4899a128", "articleId": "12OmNs4S8Bt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdar/2013/4999/0/06628795", "title": "Triangular Mesh Based Stroke Segmentation for Chinese Calligraphy", "doi": null, "abstractUrl": "/proceedings-article/icdar/2013/06628795/12OmNBsLP9w", "parentPublication": { "id": "proceedings/icdar/2013/4999/0", "title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2009/3725/0/3725a798", "title": "A Kai Style Calligraphic Beautification Method for Handwriting Chinese Character", "doi": null, "abstractUrl": "/proceedings-article/icdar/2009/3725a798/12OmNvjyxC3", "parentPublication": { "id": "proceedings/icdar/2009/3725/0", "title": "2009 10th International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/1995/7128/2/71280689", "title": "The field transform and its application to personal handwritten Chinese character recognition", "doi": null, "abstractUrl": "/proceedings-article/icdar/1995/71280689/12OmNx5YvkW", "parentPublication": { "id": "proceedings/icdar/1995/7128/2", "title": "Proceedings of 3rd International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2009/3725/0/3725a531", "title": "An Investigation of Imaginary Stroke Techinique for Cursive Online Handwriting Chinese Character Recognition", "doi": null, "abstractUrl": "/proceedings-article/icdar/2009/3725a531/12OmNxWui8G", "parentPublication": { "id": "proceedings/icdar/2009/3725/0", "title": "2009 10th International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci/2006/0475/1/04216461", "title": "A Novel Stroke Extraction Model for Chinese Characters based on Steerable Filters", "doi": null, "abstractUrl": "/proceedings-article/icci/2006/04216461/12OmNzAohQ3", "parentPublication": { "id": "proceedings/icci/2006/0475/1", "title": "Cognitive Informatics, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccms/2010/3941/1/3941a110", "title": "Semantic-Based Handwritten Chinese Character Recognition Model", "doi": null, "abstractUrl": "/proceedings-article/iccms/2010/3941a110/12OmNzWOBfm", "parentPublication": { "id": "proceedings/iccms/2010/3941/3", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icis/2009/3641/0/3641a683", "title": "Animating the Brush-writing Process of Chinese Calligraphy Characters", "doi": null, "abstractUrl": "/proceedings-article/icis/2009/3641a683/12OmNzhna80", "parentPublication": { "id": "proceedings/icis/2009/3641/0", "title": "Computer and Information Science, ACIS International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1999/10/i1095", "title": "Identification of Fork Points on the Skeletons of Handwritten Chinese Characters", "doi": null, "abstractUrl": "/journal/tp/1999/10/i1095/13rRUILtJs1", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2008/05/ttp2008050767", "title": "Markov Random Field-Based Statistical Character Structure Modeling for Handwritten Chinese Character Recognition", "doi": null, "abstractUrl": "/journal/tp/2008/05/ttp2008050767/13rRUxC0SFb", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050694", "title": "Beautification of Design Sketches Using Trainable Stroke Clustering and Curve Fitting", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050694/13rRUxd2aYX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzUPpz4", "title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)", "acronym": "icassp", "groupId": "1000002", "volume": "2", "displayVolume": "2", "year": "2002", "__typename": "ProceedingType" }, "article": { "id": "12OmNvA1hqU", "doi": "10.1109/ICASSP.2002.5745031", "title": "User-independent retrieval of free-form hand-drawn sketches", "normalizedTitle": "User-independent retrieval of free-form hand-drawn sketches", "abstract": "In this paper we propose a method to retrieve free-form hand-drawn sketches stored in the form of multiple strokes, by extracting the shape information for each stroke and by considering the geometric relationship between the strokes. To extract the shape information, a number of shape estimators are applied to each stroke to provide a soft decision about how similar it is to a particular shape type. Then, two strokes are matched according to a specific set of features for each shape type. The proximity of the corresponding strokes is used to account for the geometric relationship between multiple strokes during the matching stage. Our approach is robust to different drawing styles, thus making our retrieval system user-independent. Sketch retrieval is useful in applications where a user can easily search through a database of hand-draw sketches by inputting a sketch about what he/she is looking for, without the trouble of describing it using keywords.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we propose a method to retrieve free-form hand-drawn sketches stored in the form of multiple strokes, by extracting the shape information for each stroke and by considering the geometric relationship between the strokes. To extract the shape information, a number of shape estimators are applied to each stroke to provide a soft decision about how similar it is to a particular shape type. Then, two strokes are matched according to a specific set of features for each shape type. The proximity of the corresponding strokes is used to account for the geometric relationship between multiple strokes during the matching stage. Our approach is robust to different drawing styles, thus making our retrieval system user-independent. Sketch retrieval is useful in applications where a user can easily search through a database of hand-draw sketches by inputting a sketch about what he/she is looking for, without the trouble of describing it using keywords.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we propose a method to retrieve free-form hand-drawn sketches stored in the form of multiple strokes, by extracting the shape information for each stroke and by considering the geometric relationship between the strokes. To extract the shape information, a number of shape estimators are applied to each stroke to provide a soft decision about how similar it is to a particular shape type. Then, two strokes are matched according to a specific set of features for each shape type. The proximity of the corresponding strokes is used to account for the geometric relationship between multiple strokes during the matching stage. Our approach is robust to different drawing styles, thus making our retrieval system user-independent. Sketch retrieval is useful in applications where a user can easily search through a database of hand-draw sketches by inputting a sketch about what he/she is looking for, without the trouble of describing it using keywords.", "fno": "05745031", "keywords": [ "Feature Extraction", "Performance Evaluation", "Board Of Directors", "Data Mining", "Iron", "Data Preprocessing", "Matched Filters" ], "authors": [ { "affiliation": "Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA", "fullName": "Wing Ho Leung", "givenName": "Wing Ho", "surname": "Leung", "__typename": "ArticleAuthorType" }, { "affiliation": "Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA", "fullName": "Tsuhan Chen", "givenName": "Tsuhan", "surname": "Chen", "__typename": "ArticleAuthorType" } ], "idPrefix": "icassp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2002-05-01T00:00:00", "pubType": "proceedings", "pages": "II-2029-II-2032", "year": "2002", "issn": "1520-6149", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05745030", "articleId": "12OmNBZHiia", "__typename": "AdjacentArticleType" }, "next": { "fno": "05745032", "articleId": "12OmNyuyacb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2017/1034/0/1034c280", "title": "An Accurate System for Fashion Hand-Drawn Sketches Vectorization", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034c280/12OmNCyTyn5", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/2/212820642", "title": "Retrieval of On-line Hand-Drawn Sketches", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212820642/12OmNviZlzu", "parentPublication": { "id": "proceedings/icpr/2004/2128/2", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/1999/0318/0/03180289", "title": "Multimedia Database Retrieval Using Hand-Drawn Sketches", "doi": null, "abstractUrl": "/proceedings-article/icdar/1999/03180289/12OmNwlZu3N", "parentPublication": { "id": "proceedings/icdar/1999/0318/0", "title": "Proceedings of the Fifth International Conference on Document Analysis and Recognition. ICDAR '99 (Cat. No.PR00318)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/03/mcg2017030070", "title": "ColorSketch: A Drawing Assistant for Generating Color Sketches from Photos", "doi": null, "abstractUrl": "/magazine/cg/2017/03/mcg2017030070/13rRUwkfB20", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050694", "title": "Beautification of Design Sketches Using Trainable Stroke Clustering and Curve Fitting", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050694/13rRUxd2aYX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/01/08509167", "title": "Pictionary-Style Word Guessing on Hand-Drawn Object Sketches: Dataset, Analysis and Deep Network Models", "doi": null, "abstractUrl": "/journal/tp/2020/01/08509167/14Fq0W9lXH2", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2019/1764/0/176400a141", "title": "Android GUI Search Using Hand-Drawn Sketches", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2019/176400a141/1cJ7nPDMJag", "parentPublication": { "id": "proceedings/icse-companion/2019/1764/0", "title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a069", "title": "Query by Partially-Drawn Sketches for 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a069/1fHkpp4xIJi", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2020/8771/0/09122329", "title": "Manifold Learning for Hand Drawn Sketches", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2020/09122329/1kRSfWCISnm", "parentPublication": { "id": "proceedings/nicoint/2020/8771/0", "title": "2020 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h434", "title": "Learning to Shadow Hand-Drawn Sketches", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h434/1m3o1V04tiM", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzvQI1T", "title": "Proceedings of 3rd International Conference on Document Analysis and Recognition", "acronym": "icdar", "groupId": "1000219", "volume": "1", "displayVolume": "1", "year": "1995", "__typename": "ProceedingType" }, "article": { "id": "12OmNwDACpr", "doi": "10.1109/ICDAR.1995.599020", "title": "Line extraction and stroke ordering of text pages", "normalizedTitle": "Line extraction and stroke ordering of text pages", "abstract": "A method is developed to extract lines from pages of handwritten text by finding the shortest spanning tree of a graph formed from the set of main strokes. Main strokes of extracted lines are arranged in the same order as they were written by following the path in which they are contained. Then, every secondary stroke is assigned to the closest main stroke. At the end, an ordered list of main strokes each with the corresponding number of assigned secondary strokes is obtained. Each combination of main-secondary strokes can be the input to a subsequent recognition stage. The method is more suited to variable handwriting.", "abstracts": [ { "abstractType": "Regular", "content": "A method is developed to extract lines from pages of handwritten text by finding the shortest spanning tree of a graph formed from the set of main strokes. Main strokes of extracted lines are arranged in the same order as they were written by following the path in which they are contained. Then, every secondary stroke is assigned to the closest main stroke. At the end, an ordered list of main strokes each with the corresponding number of assigned secondary strokes is obtained. Each combination of main-secondary strokes can be the input to a subsequent recognition stage. The method is more suited to variable handwriting.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A method is developed to extract lines from pages of handwritten text by finding the shortest spanning tree of a graph formed from the set of main strokes. Main strokes of extracted lines are arranged in the same order as they were written by following the path in which they are contained. Then, every secondary stroke is assigned to the closest main stroke. At the end, an ordered list of main strokes each with the corresponding number of assigned secondary strokes is obtained. Each combination of main-secondary strokes can be the input to a subsequent recognition stage. The method is more suited to variable handwriting.", "fno": "71280390", "keywords": [ "Handwriting Recognition Character Recognition Line Extraction Stroke Ordering Text Pages Handwritten Text Shortest Spanning Tree Closest Main Stroke Ordered List Variable Handwriting" ], "authors": [ { "affiliation": "Dept. of Comput. Eng., King Saud Univ., Riyadh, Saudi Arabia", "fullName": "I.S.I. Abuhaiba", "givenName": "I.S.I.", "surname": "Abuhaiba", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Eng., King Saud Univ., Riyadh, Saudi Arabia", "fullName": "S. Datta", "givenName": "S.", "surname": "Datta", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Eng., King Saud Univ., Riyadh, Saudi Arabia", "fullName": "M.J.J. Holt", "givenName": "M.J.J.", "surname": "Holt", "__typename": "ArticleAuthorType" } ], "idPrefix": "icdar", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1995-08-01T00:00:00", "pubType": "proceedings", "pages": "390", "year": "1995", "issn": null, "isbn": "0-8186-7128-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "71280386", "articleId": "12OmNyPQ4Ps", "__typename": "AdjacentArticleType" }, "next": { "fno": "71280394", "articleId": "12OmNznkKek", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAkWvaO", "title": "Proceedings. Computer Graphics International 2001", "acronym": "cgi", "groupId": "1000132", "volume": "0", "displayVolume": "0", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNwcUjVd", "doi": "10.1109/CGI.2001.934665", "title": "Creating Pen-and-Ink Illustration Using Stroke Morphing Method", "normalizedTitle": "Creating Pen-and-Ink Illustration Using Stroke Morphing Method", "abstract": "Abstract: Most illustration systems need a lot of user strokes to generate natural-looking pen-and-ink illustrations. In order to reduce the number of user strokes necessary, we propose a new method for pen-and-ink illustrations using a stroke morphing concept. For this, we introduce a general stroke morphing procedure, which consists of both flow-oriented morphing and shape-oriented morphing. Using this morphing technique, we can make more natural-looking pen-and-ink illustration with fewer user strokes. This work can be applied to generate simplified pictures for dictionary typesetting. The main purpose of this paper is to describe this method, which requires fewer user strokes than other previous methods. Experimental results are given in the final section.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract: Most illustration systems need a lot of user strokes to generate natural-looking pen-and-ink illustrations. In order to reduce the number of user strokes necessary, we propose a new method for pen-and-ink illustrations using a stroke morphing concept. For this, we introduce a general stroke morphing procedure, which consists of both flow-oriented morphing and shape-oriented morphing. Using this morphing technique, we can make more natural-looking pen-and-ink illustration with fewer user strokes. This work can be applied to generate simplified pictures for dictionary typesetting. The main purpose of this paper is to describe this method, which requires fewer user strokes than other previous methods. Experimental results are given in the final section.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract: Most illustration systems need a lot of user strokes to generate natural-looking pen-and-ink illustrations. In order to reduce the number of user strokes necessary, we propose a new method for pen-and-ink illustrations using a stroke morphing concept. For this, we introduce a general stroke morphing procedure, which consists of both flow-oriented morphing and shape-oriented morphing. Using this morphing technique, we can make more natural-looking pen-and-ink illustration with fewer user strokes. This work can be applied to generate simplified pictures for dictionary typesetting. The main purpose of this paper is to describe this method, which requires fewer user strokes than other previous methods. Experimental results are given in the final section.", "fno": "10070113", "keywords": [ "Illustration", "Stroke Morphing" ], "authors": [ { "affiliation": "ETRI", "fullName": "Hye-Sun Kim", "givenName": "Hye-Sun", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "Pusan National University", "fullName": "Hee-Jeong Jin", "givenName": "Hee-Jeong", "surname": "Jin", "__typename": "ArticleAuthorType" }, { "affiliation": "Pusan National University", "fullName": "Young-Jung Yu", "givenName": "Young-Jung", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Pusan National University", "fullName": "Hwan-Gue Cho", "givenName": "Hwan-Gue", "surname": "Cho", "__typename": "ArticleAuthorType" } ], "idPrefix": "cgi", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-07-01T00:00:00", "pubType": "proceedings", "pages": "0113", "year": "2001", "issn": null, "isbn": "0-7695-1007-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "10070107", "articleId": "12OmNzIUfJG", "__typename": "AdjacentArticleType" }, "next": { "fno": "10070123", "articleId": "12OmNykCcfM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNC3XhgT", "title": "Computational Intelligence and Multimedia Applications, International Conference on", "acronym": "iccima", "groupId": "1000114", "volume": "0", "displayVolume": "0", "year": "2003", "__typename": "ProceedingType" }, "article": { "id": "12OmNzIl3A8", "doi": "10.1109/ICCIMA.2003.1238144", "title": "A Novel Approach in Off-Line Handwritten Chinese Character Stroke Segmentation", "normalizedTitle": "A Novel Approach in Off-Line Handwritten Chinese Character Stroke Segmentation", "abstract": "In recognition of hand-written characters, stroke segmentation often serves as a crucial step. In this paper, we introduce a new method called Manifold Extraction to solve this problem. The basic idea of Manifold Extraction is: first build a neighborhood graph to capture the intrinsic topological structure of the sampled characters, then analyze the dimensional uniformity of neighboring points to discover the segments of strokes, finally combine the segments that are possibly from the same stroke and get the more informative structures of the characters. In this way, Manifold Extraction identifies the interlacing strokes in a complicated background and accomplishes the step of stroke segmentation. The experimental results show the effectiveness of this method in stroke segmentation as well as in exploratory data analysis.", "abstracts": [ { "abstractType": "Regular", "content": "In recognition of hand-written characters, stroke segmentation often serves as a crucial step. In this paper, we introduce a new method called Manifold Extraction to solve this problem. The basic idea of Manifold Extraction is: first build a neighborhood graph to capture the intrinsic topological structure of the sampled characters, then analyze the dimensional uniformity of neighboring points to discover the segments of strokes, finally combine the segments that are possibly from the same stroke and get the more informative structures of the characters. In this way, Manifold Extraction identifies the interlacing strokes in a complicated background and accomplishes the step of stroke segmentation. The experimental results show the effectiveness of this method in stroke segmentation as well as in exploratory data analysis.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In recognition of hand-written characters, stroke segmentation often serves as a crucial step. In this paper, we introduce a new method called Manifold Extraction to solve this problem. The basic idea of Manifold Extraction is: first build a neighborhood graph to capture the intrinsic topological structure of the sampled characters, then analyze the dimensional uniformity of neighboring points to discover the segments of strokes, finally combine the segments that are possibly from the same stroke and get the more informative structures of the characters. In this way, Manifold Extraction identifies the interlacing strokes in a complicated background and accomplishes the step of stroke segmentation. The experimental results show the effectiveness of this method in stroke segmentation as well as in exploratory data analysis.", "fno": "19570314", "keywords": [ "Stroke Segmentation", "Local PCA", "Isomap", "Intrinsic Dimensionality" ], "authors": [ { "affiliation": "Tsinghua University", "fullName": "Tao Ban", "givenName": "Tao", "surname": "Ban", "__typename": "ArticleAuthorType" }, { "affiliation": "Tsinghua University", "fullName": "Chang-shui Zhang", "givenName": "Chang-shui", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Tsinghua University", "fullName": "Wei Shu", "givenName": "Wei", "surname": "Shu", "__typename": "ArticleAuthorType" }, { "affiliation": "Tsinghua University", "fullName": "Zhong-bao Kou", "givenName": "Zhong-bao", "surname": "Kou", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccima", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2003-09-01T00:00:00", "pubType": "proceedings", "pages": "314", "year": "2003", "issn": null, "isbn": "0-7695-1957-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "19570308", "articleId": "12OmNzUgcY2", "__typename": "AdjacentArticleType" }, "next": { "fno": "19570319", "articleId": "12OmNCwlafm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2002/1695/3/169530249", "title": "Off-Line Handwritten Chinese Character Stroke Extraction", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/169530249/12OmNBW0vF6", "parentPublication": { "id": "proceedings/icpr/2002/1695/3", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2012/2262/0/06424493", "title": "A Novel Approach for Stroke Extraction of Off-Line Chinese Handwritten Characters Based on Optimum Paths", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2012/06424493/12OmNBpVQ2h", "parentPublication": { "id": "proceedings/icfhr/2012/2262/0", "title": "2012 International Conference on Frontiers in Handwriting Recognition (ICFHR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2000/0750/4/07504368", "title": "A Model of Stroke Extraction from Chinese Character Images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2000/07504368/12OmNqFJhOb", "parentPublication": { "id": "proceedings/icpr/2000/0750/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2006/2521/1/252110868", "title": "Stroke Segmentation of Chinese Characters Using Markov Random Fields", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252110868/12OmNviZlNr", "parentPublication": { "id": "proceedings/icpr/2006/2521/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2009/3725/0/3725a531", "title": "An Investigation of Imaginary Stroke Techinique for Cursive Online Handwriting Chinese Character Recognition", "doi": null, "abstractUrl": "/proceedings-article/icdar/2009/3725a531/12OmNxWui8G", "parentPublication": { "id": "proceedings/icdar/2009/3725/0", "title": "2009 10th International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/1999/0318/0/03180665", "title": "Stroke-Guided Pixel Matching for Handwritten Chinese Character Recognition", "doi": null, "abstractUrl": "/proceedings-article/icdar/1999/03180665/12OmNyrqzsv", "parentPublication": { "id": "proceedings/icdar/1999/0318/0", "title": "Proceedings of the Fifth International Conference on Document Analysis and Recognition. ICDAR '99 (Cat. No.PR00318)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2012/2262/0/06424485", "title": "Stroke Segmentation and Recognition from Bangla Online Handwritten Text", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2012/06424485/12OmNzG4gy6", "parentPublication": { "id": "proceedings/icfhr/2012/2262/0", "title": "2012 International Conference on Frontiers in Handwriting Recognition (ICFHR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1993/02/i0180", "title": "Attributed String Matching by Split-and-Merge for On-Line Chinese Character Recognition", "doi": null, "abstractUrl": "/journal/tp/1993/02/i0180/13rRUwbs2hg", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2018/9159/0/08594985", "title": "DeepAD: A Deep Learning Based Approach to Stroke-Level Abnormality Detection in Handwritten Chinese Character Recognition", "doi": null, "abstractUrl": "/proceedings-article/icdm/2018/08594985/17D45VtKitQ", "parentPublication": { "id": "proceedings/icdm/2018/9159/0", "title": "2018 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2021/07/09036986", "title": "<sc>SmartSO</sc>: Chinese Character and Stroke Order Recognition With Smartwatch", "doi": null, "abstractUrl": "/journal/tm/2021/07/09036986/1igMQsEQrOo", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1fHkkWQ0aEE", "title": "2019 International Conference on Cyberworlds (CW)", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1fHkpp4xIJi", "doi": "10.1109/CW.2019.00020", "title": "Query by Partially-Drawn Sketches for 3D Shape Retrieval", "normalizedTitle": "Query by Partially-Drawn Sketches for 3D Shape Retrieval", "abstract": "Hand-drawn sketch is a powerful modality to query 3D shape models. However, specifying a detailed 3D shape by a sketch on the first try without reference (i.e., 3D model or real object) is difficult. In this paper, we aim at a sketch-based 3D shape retrieval system that tolerates coarsely drawn or incomplete sketches having small number of strokes. Such a system could be used to start a sketch-retrieve-refine interactive loop that could lead to a 3D shape having required shape details. Proposed algorithm uses deep feature embedding into common feature embedding space to compare sketches and 3D shape models. To handle coarse or incomplete sketches, a sketch, which is a sequence of strokes, is augmented by removing stroke for training a pair of DNNs to extract sketch features. A sketch feature is a fusion of an image based feature extracted by a convolutional neural network (CNN) and a 2D point sequence feature extracted by using a recurrent neural network (RNN). Embedding of 3D shape feature and the sketch feature is learned by using triplet loss. Experimental evaluation of the proposed method is performed using (simulated) incomplete sketches created by removing part of their strokes. The experiments show that sketch stroke removal augmentation significantly improved retrieval accuracy if queried by using such incomplete sketches.", "abstracts": [ { "abstractType": "Regular", "content": "Hand-drawn sketch is a powerful modality to query 3D shape models. However, specifying a detailed 3D shape by a sketch on the first try without reference (i.e., 3D model or real object) is difficult. In this paper, we aim at a sketch-based 3D shape retrieval system that tolerates coarsely drawn or incomplete sketches having small number of strokes. Such a system could be used to start a sketch-retrieve-refine interactive loop that could lead to a 3D shape having required shape details. Proposed algorithm uses deep feature embedding into common feature embedding space to compare sketches and 3D shape models. To handle coarse or incomplete sketches, a sketch, which is a sequence of strokes, is augmented by removing stroke for training a pair of DNNs to extract sketch features. A sketch feature is a fusion of an image based feature extracted by a convolutional neural network (CNN) and a 2D point sequence feature extracted by using a recurrent neural network (RNN). Embedding of 3D shape feature and the sketch feature is learned by using triplet loss. Experimental evaluation of the proposed method is performed using (simulated) incomplete sketches created by removing part of their strokes. The experiments show that sketch stroke removal augmentation significantly improved retrieval accuracy if queried by using such incomplete sketches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Hand-drawn sketch is a powerful modality to query 3D shape models. However, specifying a detailed 3D shape by a sketch on the first try without reference (i.e., 3D model or real object) is difficult. In this paper, we aim at a sketch-based 3D shape retrieval system that tolerates coarsely drawn or incomplete sketches having small number of strokes. Such a system could be used to start a sketch-retrieve-refine interactive loop that could lead to a 3D shape having required shape details. Proposed algorithm uses deep feature embedding into common feature embedding space to compare sketches and 3D shape models. To handle coarse or incomplete sketches, a sketch, which is a sequence of strokes, is augmented by removing stroke for training a pair of DNNs to extract sketch features. A sketch feature is a fusion of an image based feature extracted by a convolutional neural network (CNN) and a 2D point sequence feature extracted by using a recurrent neural network (RNN). Embedding of 3D shape feature and the sketch feature is learned by using triplet loss. Experimental evaluation of the proposed method is performed using (simulated) incomplete sketches created by removing part of their strokes. The experiments show that sketch stroke removal augmentation significantly improved retrieval accuracy if queried by using such incomplete sketches.", "fno": "229700a069", "keywords": [ "Convolutional Neural Nets", "Feature Extraction", "Image Classification", "Image Representation", "Image Retrieval", "Learning Artificial Intelligence", "Recurrent Neural Nets", "Solid Modelling", "DN Ns", "Triplet Loss", "Recurrent Neural Network", "Convolutional Neural Network", "Sketch Stroke Removal Augmentation", "3 D Shape Feature", "2 D Point Sequence Feature", "Sketch Feature", "Deep Feature Embedding", "Shape Details", "Sketch Retrieve Refine Interactive Loop", "Sketch Based 3 D Shape Retrieval System", "Query 3 D Shape Models", "Hand Drawn Sketch", "Feature Extraction", "Three Dimensional Displays", "Shape", "Training", "Measurement", "Recurrent Neural Networks", "3 D Shape Retrieval", "Sketch Based Retrieval", "Deep Metric Learning", "Triplet Network", "Recurrent Neural Network", "2 D Convolutional Neural Network" ], "authors": [ { "affiliation": "University of Yamanashi", "fullName": "Shutaro Kuwabara", "givenName": "Shutaro", "surname": "Kuwabara", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Yamanashi", "fullName": "Ryutarou Ohbuchi", "givenName": "Ryutarou", "surname": "Ohbuchi", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Yamanashi", "fullName": "Takahiko Furuya", "givenName": "Takahiko", "surname": "Furuya", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "69-76", "year": "2019", "issn": null, "isbn": "978-1-7281-2297-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "229700a061", "articleId": "1fHkoP8izEQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "229700a077", "articleId": "1fHkl2Uoh8c", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457d615", "title": "Learning Barycentric Representations of 3D Shapes for Sketch-Based 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d615/12OmNB0FxiX", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2017/2610/0/261001a067", "title": "3D Shape Reconstruction from Sketches via Multi-view Convolutional Networks", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a067/12OmNCu4nbZ", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2016/4400/0/4400a261", "title": "3D Model Retrieval Based on Hand Drawn Sketches Using LDA Model", "doi": null, "abstractUrl": "/proceedings-article/icdh/2016/4400a261/12OmNqyUUvj", "parentPublication": { "id": "proceedings/icdh/2016/4400/0", "title": "2016 6th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/06/mcg2017060088", "title": "Sketch-Based Articulated 3D Shape Retrieval", "doi": null, "abstractUrl": "/magazine/cg/2017/06/mcg2017060088/13rRUwfqpG7", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2018/9497/0/949700a311", "title": "Sketch-Based Shape Retrieval via Multi-view Attention and Generalized Similarity", "doi": null, "abstractUrl": "/proceedings-article/icdh/2018/949700a311/17D45VObpQZ", "parentPublication": { "id": "proceedings/icdh/2018/9497/0", "title": "2018 7th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200n3003", "title": "Sketch2Mesh: Reconstructing and Editing 3D Shapes from Sketches", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200n3003/1BmJsHikEfu", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102925", "title": "Cross-Modal Guidance Network For Sketch-Based 3d Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102925/1kwqTrDSXF6", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h434", "title": "Learning to Shadow Hand-Drawn Sketches", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h434/1m3o1V04tiM", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a081", "title": "Towards 3D VR-Sketch to 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a081/1qyxlDtR0Ji", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2020/9234/0/923400a184", "title": "Deep 3D Shape Reconstruction from Single-View Sketch Image", "doi": null, "abstractUrl": "/proceedings-article/icdh/2020/923400a184/1uGY2GTiIda", "parentPublication": { "id": "proceedings/icdh/2020/9234/0", "title": "2020 8th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCb3frA", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "acronym": "cluster", "groupId": "1000095", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNA14Aeu", "doi": "10.1109/CLUSTER.2015.148", "title": "HUCAA 2015 Workshop Welcome Message", "normalizedTitle": "HUCAA 2015 Workshop Welcome Message", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "6598z029", "keywords": [], "authors": [], "idPrefix": "cluster", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "xxix-xxix", "year": "2015", "issn": null, "isbn": "978-1-4673-6598-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "6598z027", "articleId": "12OmNrH1PAH", "__typename": "AdjacentArticleType" }, "next": { "fno": "6598z030", "articleId": "12OmNAmE5Yt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cluster/2015/6598/0/6598z031", "title": "HPCMASPA 2015 Workshop Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z031/12OmNAWpyr6", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom-bigdatase-i-spa/2015/7952/3/07345618", "title": "Message from the PBio 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/trustcom-bigdatase-i-spa/2015/07345618/12OmNvSKNCd", "parentPublication": { "id": "trustcom-bigdatase-i-spa/2015/7952/3", "title": "2015 IEEE Trustcom/BigDataSE/I​SPA", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsa/2015/7367/0/7367z008", "title": "Message from FiSTA 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/iccsa/2015/7367z008/12OmNxecRTR", "parentPublication": { "id": "proceedings/iccsa/2015/7367/0", "title": "2015 15th International Conference on Computational Science and Its Applications (ICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07362994", "title": "Message from the IWSCA 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07362994/12OmNy3RRCl", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z018", "title": "CLUSTER 2015 General Co-Chairs Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z018/12OmNyKJii3", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2015/8828/0/8828z015", "title": "Welcome Message from the ITNG 2015 General Chair", "doi": null, "abstractUrl": "/proceedings-article/itng/2015/8828z015/12OmNyqRnk8", "parentPublication": { "id": "proceedings/itng/2015/8828/0", "title": "2015 12th International Conference on Information Technology - New Generations (ITNG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07362996", "title": "Message from the ICTA 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07362996/12OmNyvGymI", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsa/2015/7367/0/7367z009", "title": "Message from TTSDP 2015 Workshop Chair", "doi": null, "abstractUrl": "/proceedings-article/iccsa/2015/7367z009/12OmNyyeWzl", "parentPublication": { "id": "proceedings/iccsa/2015/7367/0", "title": "2015 15th International Conference on Computational Science and Its Applications (ICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z033", "title": "HiPINEB 2015 Workshop Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z033/12OmNz2TCvL", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcoss/2021/3929/0/392900z017", "title": "Welcome Message from the Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/dcoss/2021/392900z017/1yBFhNo0B1e", "parentPublication": { "id": "proceedings/dcoss/2021/3929/0", "title": "2021 17th International Conference on Distributed Computing in Sensor Systems (DCOSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrAdsuf", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNrnJ6KU", "doi": "10.1109/ISMAR.2015.4", "title": "Message from the ISMAR 2015 General Chairs", "normalizedTitle": "Message from the ISMAR 2015 General Chairs", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "7660z011", "keywords": [], "authors": [], "idPrefix": "ismar", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "xi-xi", "year": "2015", "issn": null, "isbn": "978-1-4673-7660-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7660ztoc", "articleId": "12OmNCdBDTR", "__typename": "AdjacentArticleType" }, "next": { "fno": "7660z012", "articleId": "12OmNySosNz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/euc/2015/8299/0/8299z009", "title": "Message from the EUC 2015 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/euc/2015/8299z009/12OmNCvumQw", "parentPublication": { "id": "proceedings/euc/2015/8299/0", "title": "2015 IEEE 13th International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wbma/2015/1377/0/07932669", "title": "Message from General Chairs WBMA 2015", "doi": null, "abstractUrl": "/proceedings-article/wbma/2015/07932669/12OmNwKoZcM", "parentPublication": { "id": "proceedings/wbma/2015/1377/0", "title": "2015 6th Brazilian Workshop on Agile Methods (WBMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836426", "title": "Message from the ISMAR 2016 General Chair and Deputy General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836426/12OmNwNwzCN", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07362972", "title": "Message from the IUCC 2015 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07362972/12OmNwekjF3", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z018", "title": "CLUSTER 2015 General Co-Chairs Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z018/12OmNyKJii3", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07362976", "title": "Message from the DASC 2015 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07362976/12OmNzTH0Ol", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07362968", "title": "Message from the CIT 2015 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07362968/12OmNzwpU6k", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900z010", "title": "Message from the ISMAR 2018 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900z010/17D45VsBU1W", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943702", "title": "Message from the ISMAR 2019 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943702/1hcx1d8JHgY", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800z009", "title": "Message from the ISMAR 2021 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800z009/1yeD0R3eZa0", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrAdsuf", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNySosNz", "doi": "10.1109/ISMAR.2015.5", "title": "Welcome Message from the ISMAR 2015 Science and Technology Program Chairs", "normalizedTitle": "Welcome Message from the ISMAR 2015 Science and Technology Program Chairs", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "7660z012", "keywords": [], "authors": [], "idPrefix": "ismar", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "xii-xiv", "year": "2015", "issn": null, "isbn": "978-1-4673-7660-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7660z011", "articleId": "12OmNrnJ6KU", "__typename": "AdjacentArticleType" }, "next": { "fno": "7660z015", "articleId": "12OmNBr4eom", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/prdc/2015/9376/0/9376z011", "title": "Welcome Message from the PRDC 2015 Program Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/prdc/2015/9376z011/12OmNrJAefK", "parentPublication": { "id": "proceedings/prdc/2015/9376/0", "title": "2015 IEEE 21st Pacific Rim International Symposium on Dependable Computing (PRDC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660z011", "title": "Message from the ISMAR 2015 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660z011/12OmNrnJ6KU", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2017/2943/0/2943z016", "title": "Message from the ISMAR 2017 Science and Technology Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2017/2943z016/12OmNscxj4F", "parentPublication": { "id": "proceedings/ismar/2017/2943/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prdc/2015/9376/0/9376z010", "title": "Welcome Message from the PRDC 2015 General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/prdc/2015/9376z010/12OmNwwuDRT", "parentPublication": { "id": "proceedings/prdc/2015/9376/0", "title": "2015 IEEE 21st Pacific Rim International Symposium on Dependable Computing (PRDC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836427", "title": "Message from the ISMAR 2016 Science and Technology Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836427/12OmNxveNMo", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z018", "title": "CLUSTER 2015 General Co-Chairs Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z018/12OmNyKJii3", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2017/2943/0/2943z013", "title": "Message from the ISMAR 2017 Science and Technology Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2017/2943z013/12OmNz6iOks", "parentPublication": { "id": "proceedings/ismar/2017/2943/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900z015", "title": "Message from the ISMAR 2018 Science and Technology Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900z015/17D45Xq6dC5", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943696", "title": "Message from the ISMAR 2019 Science and Technology Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943696/1hcx3Kkxnc4", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943623", "title": "Message from the ISMAR 2018 Science and Technology Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943623/1hcx4e3AQKY", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCb3frA", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "acronym": "cluster", "groupId": "1000095", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNz2TCvL", "doi": "10.1109/CLUSTER.2015.155", "title": "HiPINEB 2015 Workshop Welcome Message", "normalizedTitle": "HiPINEB 2015 Workshop Welcome Message", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "6598z033", "keywords": [], "authors": [], "idPrefix": "cluster", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "xxxiii-xxxiii", "year": "2015", "issn": null, "isbn": "978-1-4673-6598-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "6598z032", "articleId": "12OmNvAiSa5", "__typename": "AdjacentArticleType" }, "next": { "fno": "6598z034", "articleId": "12OmNwE9OIR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cluster/2015/6598/0/6598z029", "title": "HUCAA 2015 Workshop Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z029/12OmNA14Aeu", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z031", "title": "HPCMASPA 2015 Workshop Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z031/12OmNAWpyr6", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom-bigdatase-i-spa/2015/7952/3/07345618", "title": "Message from the PBio 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/trustcom-bigdatase-i-spa/2015/07345618/12OmNvSKNCd", "parentPublication": { "id": "trustcom-bigdatase-i-spa/2015/7952/3", "title": "2015 IEEE Trustcom/BigDataSE/I​SPA", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsa/2015/7367/0/7367z008", "title": "Message from FiSTA 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/iccsa/2015/7367z008/12OmNxecRTR", "parentPublication": { "id": "proceedings/iccsa/2015/7367/0", "title": "2015 15th International Conference on Computational Science and Its Applications (ICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07362994", "title": "Message from the IWSCA 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07362994/12OmNy3RRCl", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z018", "title": "CLUSTER 2015 General Co-Chairs Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z018/12OmNyKJii3", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2015/8828/0/8828z015", "title": "Welcome Message from the ITNG 2015 General Chair", "doi": null, "abstractUrl": "/proceedings-article/itng/2015/8828z015/12OmNyqRnk8", "parentPublication": { "id": "proceedings/itng/2015/8828/0", "title": "2015 12th International Conference on Information Technology - New Generations (ITNG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07362996", "title": "Message from the ICTA 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07362996/12OmNyvGymI", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsa/2015/7367/0/7367z009", "title": "Message from TTSDP 2015 Workshop Chair", "doi": null, "abstractUrl": "/proceedings-article/iccsa/2015/7367z009/12OmNyyeWzl", "parentPublication": { "id": "proceedings/iccsa/2015/7367/0", "title": "2015 15th International Conference on Computational Science and Its Applications (ICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcoss/2021/3929/0/392900z017", "title": "Welcome Message from the Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/dcoss/2021/392900z017/1yBFhNo0B1e", "parentPublication": { "id": "proceedings/dcoss/2021/3929/0", "title": "2021 17th International Conference on Distributed Computing in Sensor Systems (DCOSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKisy", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45VsBU1W", "doi": "10.1109/ISMAR.2018.00005", "title": "Message from the ISMAR 2018 General Chairs", "normalizedTitle": "Message from the ISMAR 2018 General Chairs", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "745900z010", "keywords": [], "authors": [], "idPrefix": "ismar", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "10-10", "year": "2018", "issn": "1554-7868", "isbn": "978-1-5386-7459-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "745900z005", "articleId": "17D45XwUALO", "__typename": "AdjacentArticleType" }, "next": { "fno": "745900z011", "articleId": "17D45XvMcen", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2017/2943/0/2943z009", "title": "Message from the ISMAR 2017 General Chair and Deputy General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2017/2943z009/12OmNAsTgTJ", "parentPublication": { "id": "proceedings/ismar/2017/2943/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660z011", "title": "Message from the ISMAR 2015 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660z011/12OmNrnJ6KU", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836426", "title": "Message from the ISMAR 2016 General Chair and Deputy General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836426/12OmNwNwzCN", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icstw/2018/6352/0/635201z013", "title": "Message from the Mutation 2018 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icstw/2018/635201z013/12OmNxI0KAj", "parentPublication": { "id": "proceedings/icstw/2018/6352/0", "title": "2018 IEEE International Conference on Software Testing, Verification and Validation Workshops (ICSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/issre/2018/8321/0/832100z009", "title": "Message from the ISSRE 2018 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/issre/2018/832100z009/17D45XeKguM", "parentPublication": { "id": "proceedings/issre/2018/8321/0", "title": "2018 IEEE 29th International Symposium on Software Reliability Engineering (ISSRE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900z015", "title": "Message from the ISMAR 2018 Science and Technology Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900z015/17D45Xq6dC5", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2018/0604/0/060400z010", "title": "Message from the SVR 2018 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/svr/2018/060400z010/1cJ7yjeeHy8", "parentPublication": { "id": "proceedings/svr/2018/0604/0", "title": "2018 20th Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943702", "title": "Message from the ISMAR 2019 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943702/1hcx1d8JHgY", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943623", "title": "Message from the ISMAR 2018 Science and Technology Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943623/1hcx4e3AQKY", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800z009", "title": "Message from the ISMAR 2021 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800z009/1yeD0R3eZa0", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1grOKVFffCo", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hcx1d8JHgY", "doi": "10.1109/ISMAR.2019.00005", "title": "Message from the ISMAR 2019 General Chairs", "normalizedTitle": "Message from the ISMAR 2019 General Chairs", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "08943702", "keywords": [], "authors": [], "idPrefix": "ismar", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "13-13", "year": "2019", "issn": "1554-7868", "isbn": "978-1-7281-0987-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08943780", "articleId": "1hcx1hlSEWk", "__typename": "AdjacentArticleType" }, "next": { "fno": "08943622", "articleId": "1hcx2JPFazm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2015/7660/0/7660z011", "title": "Message from the ISMAR 2015 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660z011/12OmNrnJ6KU", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836426", "title": "Message from the ISMAR 2016 General Chair and Deputy General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836426/12OmNwNwzCN", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900z010", "title": "Message from the ISMAR 2018 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900z010/17D45VsBU1W", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2019/2607/1/260701z027", "title": "Message from the 2019 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/compsac/2019/260701z027/1cYiAwn59PW", "parentPublication": { "id": "proceedings/compsac/2019/2607/1", "title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2019/2607/2/260702z024", "title": "Message from the 2019 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/compsac/2019/260702z024/1cYipfFK5a0", "parentPublication": { "id": "compsac/2019/2607/2", "title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000z033", "title": "Message from the CPSCom 2019 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000z033/1ehBzFD3vRS", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0", "title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/blockchain/2019/4693/0/469300z018", "title": "Message from the Blockchain 2019 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/blockchain/2019/469300z018/1gjS8XTQem4", "parentPublication": { "id": "proceedings/blockchain/2019/4693/0", "title": "2019 IEEE International Conference on Blockchain (Blockchain)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943696", "title": "Message from the ISMAR 2019 Science and Technology Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943696/1hcx3Kkxnc4", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943715", "title": "Message from the ISMAR 2019 Exhibition &amp; Demos Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943715/1hcx3pEERMI", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800z009", "title": "Message from the ISMAR 2021 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800z009/1yeD0R3eZa0", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeCSUXkdhu", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeD0R3eZa0", "doi": "10.1109/ISMAR52148.2021.00005", "title": "Message from the ISMAR 2021 General Chairs", "normalizedTitle": "Message from the ISMAR 2021 General Chairs", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "015800z009", "keywords": [], "authors": [], "idPrefix": "ismar", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "9-10", "year": "2021", "issn": "1554-7868", "isbn": "978-1-6654-0158-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "015800z005", "articleId": "1yeD0XJ68O4", "__typename": "AdjacentArticleType" }, "next": { "fno": "015800z011", "articleId": "1yeCTeuOcqQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2015/7660/0/7660z011", "title": "Message from the ISMAR 2015 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660z011/12OmNrnJ6KU", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900z010", "title": "Message from the ISMAR 2018 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900z010/17D45VsBU1W", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/issrew/2021/2603/0/260300z015", "title": "Message from the ISSRE 2021 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/issrew/2021/260300z015/1AZOhYdE8LK", "parentPublication": { "id": "proceedings/issrew/2021/2603/0", "title": "2021 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wetice/2021/2789/0/278900z011", "title": "Message from the WETICE 2021 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/wetice/2021/278900z011/1AqwK6gOmuQ", "parentPublication": { "id": "proceedings/wetice/2021/2789/0", "title": "2021 IEEE 30th International Conference on Enabling Technologies: Infrastructure for Collaborative Enterprises (WETICE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2021/2398/0/239800z028", "title": "Message from the ICDM 2021 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/icdm/2021/239800z028/1Aqx0djKfle", "parentPublication": { "id": "proceedings/icdm/2021/2398/0", "title": "2021 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943702", "title": "Message from the ISMAR 2019 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943702/1hcx1d8JHgY", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edcc/2021/3671/0/367100z008", "title": "Message from the General Chairs EDCC 2021", "doi": null, "abstractUrl": "/proceedings-article/edcc/2021/367100z008/1yDkga9ZE88", "parentPublication": { "id": "proceedings/edcc/2021/3671/0", "title": "2021 17th European Dependable Computing Conference (EDCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800z013", "title": "Message from the ISMAR 2021 Workshop and Tutorial Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800z013/1yeQDuQoaT6", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800z012", "title": "Message from the ISMAR 2021 Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800z012/1yeQGtBgEYE", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800z014", "title": "Message from the ISMAR 2021 Demos Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800z014/1yfxJ0ndxrG", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yfxDjRGMmc", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeQGtBgEYE", "doi": "10.1109/ISMAR-Adjunct54149.2021.00005", "title": "Message from the ISMAR 2021 Poster Chairs", "normalizedTitle": "Message from the ISMAR 2021 Poster Chairs", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "129800z012", "keywords": [], "authors": [], "idPrefix": "ismar-adjunct", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "12-12", "year": "2021", "issn": null, "isbn": "978-1-6654-1298-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "129800z005", "articleId": "1yeQGXDCRnW", "__typename": "AdjacentArticleType" }, "next": { "fno": "129800z013", "articleId": "1yeQDuQoaT6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2017/2943/0/2943z016", "title": "Message from the ISMAR 2017 Science and Technology Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2017/2943z016/12OmNscxj4F", "parentPublication": { "id": "proceedings/ismar/2017/2943/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836427", "title": "Message from the ISMAR 2016 Science and Technology Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836427/12OmNxveNMo", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900z015", "title": "Message from the ISMAR 2018 Science and Technology Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900z015/17D45Xq6dC5", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943702", "title": "Message from the ISMAR 2019 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943702/1hcx1d8JHgY", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943696", "title": "Message from the ISMAR 2019 Science and Technology Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943696/1hcx3Kkxnc4", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2021/1219/0/121900z026", "title": "Message from the Poster Track Chairs of ICSE 2021", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2021/121900z026/1sET5oJI676", "parentPublication": { "id": "proceedings/icse-companion/2021/1219/0/", "title": "2021 IEEE/ACM 43rd International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800z011", "title": "Message from the ISMAR 2021 Science and Technology Conference Paper Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800z011/1yeCTeuOcqQ", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800z009", "title": "Message from the ISMAR 2021 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800z009/1yeD0R3eZa0", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800z013", "title": "Message from the ISMAR 2021 Workshop and Tutorial Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800z013/1yeQDuQoaT6", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800z014", "title": "Message from the ISMAR 2021 Demos Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800z014/1yfxJ0ndxrG", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yfxDjRGMmc", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yfxJ0ndxrG", "doi": "10.1109/ISMAR-Adjunct54149.2021.00007", "title": "Message from the ISMAR 2021 Demos Chairs", "normalizedTitle": "Message from the ISMAR 2021 Demos Chairs", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "129800z014", "keywords": [], "authors": [], "idPrefix": "ismar-adjunct", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "14-14", "year": "2021", "issn": null, "isbn": "978-1-6654-1298-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "129800z013", "articleId": "1yeQDuQoaT6", "__typename": "AdjacentArticleType" }, "next": { "fno": "129800z015", "articleId": "1yeQzNK70UE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/mobilesoft/2017/2669/0/07972702", "title": "Message from Tool Demos and Mobile Apps Track Chairs", "doi": null, "abstractUrl": "/proceedings-article/mobilesoft/2017/07972702/12OmNAJ4phb", "parentPublication": { "id": "proceedings/mobilesoft/2017/2669/0", "title": "2017 IEEE/ACM 4th International Conference on Mobile Software Engineering and Systems (MOBILESoft)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660z011", "title": "Message from the ISMAR 2015 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660z011/12OmNrnJ6KU", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900z010", "title": "Message from the ISMAR 2018 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900z010/17D45VsBU1W", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943702", "title": "Message from the ISMAR 2019 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943702/1hcx1d8JHgY", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2019/0987/0/08943715", "title": "Message from the ISMAR 2019 Exhibition &amp; Demos Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2019/08943715/1hcx3pEERMI", "parentPublication": { "id": "proceedings/ismar/2019/0987/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seams/2021/0289/0/028900z010", "title": "Message from the SEAMS 2021 Chairs SEAMS 2021", "doi": null, "abstractUrl": "/proceedings-article/seams/2021/028900z010/1tB9baiynPW", "parentPublication": { "id": "proceedings/seams/2021/0289/0/", "title": "2021 International Symposium on Software Engineering for Adaptive and Self-Managing Systems (SEAMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800z011", "title": "Message from the ISMAR 2021 Science and Technology Conference Paper Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800z011/1yeCTeuOcqQ", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800z009", "title": "Message from the ISMAR 2021 General Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800z009/1yeD0R3eZa0", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800z013", "title": "Message from the ISMAR 2021 Workshop and Tutorial Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800z013/1yeQDuQoaT6", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800z012", "title": "Message from the ISMAR 2021 Poster Chairs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800z012/1yeQGtBgEYE", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCb3frA", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "acronym": "cluster", "groupId": "1000095", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNAWpyr6", "doi": "10.1109/CLUSTER.2015.150", "title": "HPCMASPA 2015 Workshop Welcome Message", "normalizedTitle": "HPCMASPA 2015 Workshop Welcome Message", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "6598z031", "keywords": [], "authors": [], "idPrefix": "cluster", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "xxxi-xxxi", "year": "2015", "issn": null, "isbn": "978-1-4673-6598-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "6598z030", "articleId": "12OmNAmE5Yt", "__typename": "AdjacentArticleType" }, "next": { "fno": "6598z032", "articleId": "12OmNvAiSa5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cluster/2015/6598/0/6598z029", "title": "HUCAA 2015 Workshop Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z029/12OmNA14Aeu", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07362999", "title": "Message from the BigData 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07362999/12OmNAFWOPA", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07362991", "title": "Message from the WISH 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07362991/12OmNxA3Z7M", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07362997", "title": "Message from the MT4H 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07362997/12OmNxX3uFy", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsa/2015/7367/0/7367z008", "title": "Message from FiSTA 2015 Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/iccsa/2015/7367z008/12OmNxecRTR", "parentPublication": { "id": "proceedings/iccsa/2015/7367/0", "title": "2015 15th International Conference on Computational Science and Its Applications (ICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z018", "title": "CLUSTER 2015 General Co-Chairs Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z018/12OmNyKJii3", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2015/8828/0/8828z015", "title": "Welcome Message from the ITNG 2015 General Chair", "doi": null, "abstractUrl": "/proceedings-article/itng/2015/8828z015/12OmNyqRnk8", "parentPublication": { "id": "proceedings/itng/2015/8828/0", "title": "2015 12th International Conference on Information Technology - New Generations (ITNG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsa/2015/7367/0/7367z009", "title": "Message from TTSDP 2015 Workshop Chair", "doi": null, "abstractUrl": "/proceedings-article/iccsa/2015/7367z009/12OmNyyeWzl", "parentPublication": { "id": "proceedings/iccsa/2015/7367/0", "title": "2015 15th International Conference on Computational Science and Its Applications (ICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z033", "title": "HiPINEB 2015 Workshop Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z033/12OmNz2TCvL", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcoss/2021/3929/0/392900z017", "title": "Welcome Message from the Workshop Chairs", "doi": null, "abstractUrl": "/proceedings-article/dcoss/2021/392900z017/1yBFhNo0B1e", "parentPublication": { "id": "proceedings/dcoss/2021/3929/0", "title": "2021 17th International Conference on Distributed Computing in Sensor Systems (DCOSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzBOhXL", "title": "2015 IEEE 21st Pacific Rim International Symposium on Dependable Computing (PRDC)", "acronym": "prdc", "groupId": "1000191", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNrJAefK", "doi": "10.1109/PRDC.2015.5", "title": "Welcome Message from the PRDC 2015 Program Co-Chairs", "normalizedTitle": "Welcome Message from the PRDC 2015 Program Co-Chairs", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "9376z011", "keywords": [], "authors": [], "idPrefix": "prdc", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2015-11-01T00:00:00", "pubType": "proceedings", "pages": "xi-xi", "year": "2015", "issn": null, "isbn": "978-1-4673-9376-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "9376z010", "articleId": "12OmNwwuDRT", "__typename": "AdjacentArticleType" }, "next": { "fno": "9376z012", "articleId": "12OmNyNzhzS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ichi/2017/4881/0/4881z013", "title": "Welcome Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ichi/2017/4881z013/12OmNrIrPub", "parentPublication": { "id": "proceedings/ichi/2017/4881/0", "title": "2017 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcsw/2014/4181/0/06888823", "title": "Welcome Message from the Workshop Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/icdcsw/2014/06888823/12OmNs59JQQ", "parentPublication": { "id": "proceedings/icdcsw/2014/4181/0", "title": "2014 IEEE 34th International Conference on Distributed Computing Systems Workshops (ICDCSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icac/2015/6971/0/6971z013", "title": "Welcome Message from the Program Committee Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/icac/2015/6971z013/12OmNvjyxTN", "parentPublication": { "id": "proceedings/icac/2015/6971/0", "title": "2015 IEEE International Conference on Autonomic Computing (ICAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prdc/2015/9376/0/9376z010", "title": "Welcome Message from the PRDC 2015 General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/prdc/2015/9376z010/12OmNwwuDRT", "parentPublication": { "id": "proceedings/prdc/2015/9376/0", "title": "2015 IEEE 21st Pacific Rim International Symposium on Dependable Computing (PRDC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2014/5701/0/5701z012", "title": "Welcome Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ichi/2014/5701z012/12OmNy50giy", "parentPublication": { "id": "proceedings/ichi/2014/5701/0", "title": "2014 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z018", "title": "CLUSTER 2015 General Co-Chairs Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z018/12OmNyKJii3", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2015/9548/0/9548z015", "title": "Welcome from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ichi/2015/9548z015/12OmNyL0ThT", "parentPublication": { "id": "proceedings/ichi/2015/9548/0", "title": "2015 International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbd/2015/8537/0/8537z010", "title": "Welcome Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/cbd/2015/8537z010/12OmNzn38JY", "parentPublication": { "id": "proceedings/cbd/2015/8537/0", "title": "2015 Third International Conference on Advanced Cloud and Big Data (CBD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgo/2022/0584/0/09741271", "title": "Welcome from the Program Chairs", "doi": null, "abstractUrl": "/proceedings-article/cgo/2022/09741271/1C8FNZjWugE", "parentPublication": { "id": "proceedings/cgo/2022/0584/0", "title": "2022 IEEE/ACM International Symposium on Code Generation and Optimization (CGO)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rethics/2020/8350/0/835000z006", "title": "Welcome from the Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/rethics/2020/835000z006/1nYsPozr5QY", "parentPublication": { "id": "proceedings/rethics/2020/8350/0", "title": "2020 1st Workshop on Ethics in Requirements Engineering Research and Practice (REthics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzBOhXL", "title": "2015 IEEE 21st Pacific Rim International Symposium on Dependable Computing (PRDC)", "acronym": "prdc", "groupId": "1000191", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNwwuDRT", "doi": "10.1109/PRDC.2015.4", "title": "Welcome Message from the PRDC 2015 General Co-Chairs", "normalizedTitle": "Welcome Message from the PRDC 2015 General Co-Chairs", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "9376z010", "keywords": [], "authors": [], "idPrefix": "prdc", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2015-11-01T00:00:00", "pubType": "proceedings", "pages": "x-x", "year": "2015", "issn": null, "isbn": "978-1-4673-9376-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "9376ztoc", "articleId": "12OmNAle6GB", "__typename": "AdjacentArticleType" }, "next": { "fno": "9376z011", "articleId": "12OmNrJAefK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ichi/2017/4881/0/4881z013", "title": "Welcome Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ichi/2017/4881z013/12OmNrIrPub", "parentPublication": { "id": "proceedings/ichi/2017/4881/0", "title": "2017 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prdc/2015/9376/0/9376z011", "title": "Welcome Message from the PRDC 2015 Program Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/prdc/2015/9376z011/12OmNrJAefK", "parentPublication": { "id": "proceedings/prdc/2015/9376/0", "title": "2015 IEEE 21st Pacific Rim International Symposium on Dependable Computing (PRDC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom/2016/8779/0/07456495", "title": "General chairs welcome", "doi": null, "abstractUrl": "/proceedings-article/percom/2016/07456495/12OmNxFaLtj", "parentPublication": { "id": "proceedings/percom/2016/8779/0", "title": "2016 IEEE International Conference on Pervasive Computing and Communications (PerCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2014/5701/0/5701z012", "title": "Welcome Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ichi/2014/5701z012/12OmNy50giy", "parentPublication": { "id": "proceedings/ichi/2014/5701/0", "title": "2014 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z018", "title": "CLUSTER 2015 General Co-Chairs Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z018/12OmNyKJii3", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2015/9548/0/9548z015", "title": "Welcome from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ichi/2015/9548z015/12OmNyL0ThT", "parentPublication": { "id": "proceedings/ichi/2015/9548/0", "title": "2015 International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icess/2016/3727/0/3727z009", "title": "Welcome Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/icess/2016/3727z009/12OmNyRxFnu", "parentPublication": { "id": "proceedings/icess/2016/3727/0", "title": "2016 13th International Conference on Embedded Software and Systems (ICESS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rethics/2020/8350/0/835000z006", "title": "Welcome from the Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/rethics/2020/835000z006/1nYsPozr5QY", "parentPublication": { "id": "proceedings/rethics/2020/8350/0", "title": "2020 1st Workshop on Ethics in Requirements Engineering Research and Practice (REthics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom/2021/0418/0/09439132", "title": "Welcome from the General Chairs", "doi": null, "abstractUrl": "/proceedings-article/percom/2021/09439132/1tTtzPmM252", "parentPublication": { "id": "proceedings/percom/2021/0418/0", "title": "2021 IEEE International Conference on Pervasive Computing and Communications (PerCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2021/4121/0/412100z022", "title": "Welcome from the CBMS 2021 General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/cbms/2021/412100z022/1vb8OcbjEWY", "parentPublication": { "id": "proceedings/cbms/2021/4121/0", "title": "2021 IEEE 34th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCb3frA", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "acronym": "cluster", "groupId": "1000095", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNyKJii3", "doi": "10.1109/CLUSTER.2015.5", "title": "CLUSTER 2015 General Co-Chairs Welcome Message", "normalizedTitle": "CLUSTER 2015 General Co-Chairs Welcome Message", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "6598z018", "keywords": [], "authors": [], "idPrefix": "cluster", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2015-09-01T00:00:00", "pubType": "proceedings", "pages": "xviii-xx", "year": "2015", "issn": null, "isbn": "978-1-4673-6598-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "6598ztoc", "articleId": "12OmNzlUKfT", "__typename": "AdjacentArticleType" }, "next": { "fno": "6598z021", "articleId": "12OmNxFaLus", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ccgrid/2015/8006/0/8006z019", "title": "Message from the CCGrid 2015 General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ccgrid/2015/8006z019/12OmNBTJICl", "parentPublication": { "id": "proceedings/ccgrid/2015/8006/0", "title": "2015 15th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGrid)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2017/4881/0/4881z013", "title": "Welcome Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ichi/2017/4881z013/12OmNrIrPub", "parentPublication": { "id": "proceedings/ichi/2017/4881/0", "title": "2017 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prdc/2015/9376/0/9376z011", "title": "Welcome Message from the PRDC 2015 Program Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/prdc/2015/9376z011/12OmNrJAefK", "parentPublication": { "id": "proceedings/prdc/2015/9376/0", "title": "2015 IEEE 21st Pacific Rim International Symposium on Dependable Computing (PRDC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/prdc/2015/9376/0/9376z010", "title": "Welcome Message from the PRDC 2015 General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/prdc/2015/9376z010/12OmNwwuDRT", "parentPublication": { "id": "proceedings/prdc/2015/9376/0", "title": "2015 IEEE 21st Pacific Rim International Symposium on Dependable Computing (PRDC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2014/5701/0/5701z012", "title": "Welcome Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ichi/2014/5701z012/12OmNy50giy", "parentPublication": { "id": "proceedings/ichi/2014/5701/0", "title": "2014 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2015/9548/0/9548z015", "title": "Welcome from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/ichi/2015/9548z015/12OmNyL0ThT", "parentPublication": { "id": "proceedings/ichi/2015/9548/0", "title": "2015 International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icess/2016/3727/0/3727z009", "title": "Welcome Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/icess/2016/3727z009/12OmNyRxFnu", "parentPublication": { "id": "proceedings/icess/2016/3727/0", "title": "2016 13th International Conference on Embedded Software and Systems (ICESS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2015/8828/0/8828z015", "title": "Welcome Message from the ITNG 2015 General Chair", "doi": null, "abstractUrl": "/proceedings-article/itng/2015/8828z015/12OmNyqRnk8", "parentPublication": { "id": "proceedings/itng/2015/8828/0", "title": "2015 12th International Conference on Information Technology - New Generations (ITNG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbd/2015/8537/0/8537z010", "title": "Welcome Message from the General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/cbd/2015/8537z010/12OmNzn38JY", "parentPublication": { "id": "proceedings/cbd/2015/8537/0", "title": "2015 Third International Conference on Advanced Cloud and Big Data (CBD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2019/4734/0/08891036", "title": "CLUSTER 2019 Message from General Co-Chairs", "doi": null, "abstractUrl": "/proceedings-article/cluster/2019/08891036/1eLylXAGrMA", "parentPublication": { "id": "proceedings/cluster/2019/4734/0", "title": "2019 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvmowTr", "title": "2015 12th International Conference on Information Technology - New Generations (ITNG)", "acronym": "itng", "groupId": "1001685", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNyqRnk8", "doi": "10.1109/ITNG.2015.5", "title": "Welcome Message from the ITNG 2015 General Chair", "normalizedTitle": "Welcome Message from the ITNG 2015 General Chair", "abstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory welcome message from the conference proceedings. May include the conference officers' congratulations to all involved with the conference event and publication of the proceedings record.", "fno": "8828z015", "keywords": [], "authors": [], "idPrefix": "itng", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2015-04-01T00:00:00", "pubType": "proceedings", "pages": "xv-xv", "year": "2015", "issn": null, "isbn": "978-1-4799-8828-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "8828ztoc", "articleId": "12OmNwBjP6z", "__typename": "AdjacentArticleType" }, "next": { "fno": "8828z016", "articleId": "12OmNqFJhI9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sbesc/2017/3590/0/3590z009", "title": "Welcome Message from the General Chair", "doi": null, "abstractUrl": "/proceedings-article/sbesc/2017/3590z009/12OmNAoDhWQ", "parentPublication": { "id": "proceedings/sbesc/2017/3590/0", "title": "2017 VII Brazilian Symposium on Computing Systems Engineering (SBESC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icac/2016/1654/0/07573099", "title": "Welcome Message from the General Chair", "doi": null, "abstractUrl": "/proceedings-article/icac/2016/07573099/12OmNC2xhBC", "parentPublication": { "id": "proceedings/icac/2016/1654/0", "title": "2016 IEEE International Conference on Autonomic Computing (ICAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pact/2017/6764/0/6764z010", "title": "Welcome Message from the General Chair and Program Chair", "doi": null, "abstractUrl": "/proceedings-article/pact/2017/6764z010/12OmNCvcLGP", "parentPublication": { "id": "proceedings/pact/2017/6764/0", "title": "2017 26th International Conference on Parallel Architectures and Compilation Techniques (PACT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csma/2015/9166/0/9166z011", "title": "Welcome Message from the General Chair", "doi": null, "abstractUrl": "/proceedings-article/csma/2015/9166z011/12OmNvjyxzH", "parentPublication": { "id": "proceedings/csma/2015/9166/0", "title": "2015 International Conference on Computer Science and Mechanical Automation (CSMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2015/6598/0/6598z018", "title": "CLUSTER 2015 General Co-Chairs Welcome Message", "doi": null, "abstractUrl": "/proceedings-article/cluster/2015/6598z018/12OmNyKJii3", "parentPublication": { "id": "proceedings/cluster/2015/6598/0", "title": "2015 IEEE International Conference on Cluster Computing (CLUSTER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmeae/2014/6756/0/6756z009", "title": "Welcome Message from the General Chair", "doi": null, "abstractUrl": "/proceedings-article/icmeae/2014/6756z009/12OmNyjtNKx", "parentPublication": { "id": "proceedings/icmeae/2014/6756/0", "title": "2014 International Conference on Mechatronics, Electronics and Automotive Engineering (ICMEAE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccd/2018/8477/0/847700z016", "title": "Welcome Message from the General Chair", "doi": null, "abstractUrl": "/proceedings-article/iccd/2018/847700z016/17D45WK5Aka", "parentPublication": { "id": "proceedings/iccd/2018/8477/0", "title": "2018 IEEE 36th International Conference on Computer Design (ICCD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/euros&p/2017/5762/0/5762z009", "title": "Welcome Message from the General Chair", "doi": null, "abstractUrl": "/proceedings-article/euros&p/2017/5762z009/1h5WVJfvTHy", "parentPublication": { "id": "proceedings/euros&p/2017/5762/0", "title": "2017 IEEE European Symposium on Security and Privacy (EuroS&P)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsecs-icocsim/2021/1407/0/140700z021", "title": "Welcome Message from the General Chair", "doi": null, "abstractUrl": "/proceedings-article/icsecs-icocsim/2021/140700z021/1wYlAXPZ9mw", "parentPublication": { "id": "proceedings/icsecs-icocsim/2021/1407/0", "title": "2021 International Conference on Software Engineering & Computer Systems and 4th International Conference on Computational Science and Information Management (ICSECS-ICOCSIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloud-summit/2021/2582/0/258200z007", "title": "Welcome Message from the General Chair", "doi": null, "abstractUrl": "/proceedings-article/cloud-summit/2021/258200z007/1zJmM7i0hz2", "parentPublication": { "id": "proceedings/cloud-summit/2021/2582/0", "title": "2021 IEEE Cloud Summit (Cloud Summit)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNCcKQu9", "doi": "10.1109/VR.2017.7892323", "title": "Texturing of augmented reality character based on colored drawing", "normalizedTitle": "Texturing of augmented reality character based on colored drawing", "abstract": "Coloring book can inspire imaginary and creativity of children. However, with the rapid development of digital devices and internet, traditional coloring book tends to be not attractive for children any more. Thus, we propose an idea of applying augmented reality technology to traditional coloring book. After children finish coloring characters in the printed coloring book, they can inspect their work using a mobile device. The drawing is detected and tracked so that the video stream is augmented with a 3D character textured according to their coloring. This is possible thanks to several novel technical contributions. We present a texture process that generates texture map for 3D augmented reality character from 2D colored drawing using a lookup map. Considering the movement of the mobile device and drawing, we give an efficient method to track the drawing surface.", "abstracts": [ { "abstractType": "Regular", "content": "Coloring book can inspire imaginary and creativity of children. However, with the rapid development of digital devices and internet, traditional coloring book tends to be not attractive for children any more. Thus, we propose an idea of applying augmented reality technology to traditional coloring book. After children finish coloring characters in the printed coloring book, they can inspect their work using a mobile device. The drawing is detected and tracked so that the video stream is augmented with a 3D character textured according to their coloring. This is possible thanks to several novel technical contributions. We present a texture process that generates texture map for 3D augmented reality character from 2D colored drawing using a lookup map. Considering the movement of the mobile device and drawing, we give an efficient method to track the drawing surface.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Coloring book can inspire imaginary and creativity of children. However, with the rapid development of digital devices and internet, traditional coloring book tends to be not attractive for children any more. Thus, we propose an idea of applying augmented reality technology to traditional coloring book. After children finish coloring characters in the printed coloring book, they can inspect their work using a mobile device. The drawing is detected and tracked so that the video stream is augmented with a 3D character textured according to their coloring. This is possible thanks to several novel technical contributions. We present a texture process that generates texture map for 3D augmented reality character from 2D colored drawing using a lookup map. Considering the movement of the mobile device and drawing, we give an efficient method to track the drawing surface.", "fno": "07892323", "keywords": [ "Three Dimensional Displays", "Augmented Reality", "Mobile Handsets", "Streaming Media", "Real Time Systems", "Solid Modeling", "Two Dimensional Displays", "H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities", "K 3 1 Computers And Education Computer Uses In Education Computer Assisted Instruction" ], "authors": [ { "affiliation": "Research Center of Digital Media Technology, Xiamen University", "fullName": "Hengheng Zhao", "givenName": "Hengheng", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "Research Center of Digital Media Technology, Xiamen University", "fullName": "Ping Huang", "givenName": "Ping", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Research Center of Digital Media Technology, Xiamen University", "fullName": "Junfeng Yao", "givenName": "Junfeng", "surname": "Yao", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "355-356", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892322", "articleId": "12OmNyVes5J", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892324", "articleId": "12OmNqJHFsX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2012/1204/0/06184168", "title": "An interactive augmented reality coloring book", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184168/12OmNAIMO6Y", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icebe/2015/8002/0/8002a281", "title": "Applying Augmented Reality Technology to Book Publication Business", "doi": null, "abstractUrl": "/proceedings-article/icebe/2015/8002a281/12OmNAfy7JF", "parentPublication": { "id": "proceedings/icebe/2015/8002/0", "title": "2015 IEEE 12th International Conference on e-Business Engineering (ICEBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2011/2183/0/06162906", "title": "An interactive augmented reality coloring book", "doi": null, "abstractUrl": "/proceedings-article/ismar/2011/06162906/12OmNx6xHl0", "parentPublication": { "id": "proceedings/ismar/2011/2183/0", "title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892247", "title": "MagicToon: A 2D-to-3D creative cartoon modeling system with mobile AR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892247/12OmNxjjEhC", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2016/3641/0/3641a077", "title": "PPV: Pixel-Point-Volume Segmentation for Object Referencing in Collaborative Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a077/12OmNxy4N6P", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2014/3922/0/07044138", "title": "Desktop vs. mobile: A comparative study of augmented reality systems for engineering visualizations in education", "doi": null, "abstractUrl": "/proceedings-article/fie/2014/07044138/12OmNy3RRFl", "parentPublication": { "id": "proceedings/fie/2014/3922/0", "title": "2014 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/intetain/2015/0061/0/07325490", "title": "Drawing outside the lines: Tracking-based gesture interaction in mobile augmented entertainment", "doi": null, "abstractUrl": "/proceedings-article/intetain/2015/07325490/12OmNzayNem", "parentPublication": { "id": "proceedings/intetain/2015/0061/0", "title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/11/07165658", "title": "Live Texturing of Augmented Reality Characters from Colored Drawings", "doi": null, "abstractUrl": "/journal/tg/2015/11/07165658/13rRUxNW1Zq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09852696", "title": "CreatureShop: Interactive 3D Character Modeling and Texturing from a Single Color Drawing", "doi": null, "abstractUrl": "/journal/tg/5555/01/09852696/1FHlT4i4Pmw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a239", "title": "Using Marker Based Augmented Reality to teach autistic eating skills", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a239/1qpzADQ7pAY", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwwMf3H", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "acronym": "ismarw", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNxdm4C4", "doi": "10.1109/ISMAR-Adjunct.2016.0046", "title": "TeachAR: An Interactive Augmented Reality Tool for Teaching Basic English to Non-Native Children", "normalizedTitle": "TeachAR: An Interactive Augmented Reality Tool for Teaching Basic English to Non-Native Children", "abstract": "Teaching English to children who do not come from an English speaking background is an interesting challenge for educators. In this paper, we present an Augmented reality (AR) tool, TeachAR, for teaching basic English words (colors, shapes, and prepositions) to children for whom English is not a native language. In a pilot study we compare our AR system to a traditional non-AR system. The results indicate a potentially better learning outcome using the TeachAR system than the traditional system. It also showed that children enjoyed using AR-based methods. However, it also showed a few usability issues with the TeachAR interface, which we will improve on in the future.", "abstracts": [ { "abstractType": "Regular", "content": "Teaching English to children who do not come from an English speaking background is an interesting challenge for educators. In this paper, we present an Augmented reality (AR) tool, TeachAR, for teaching basic English words (colors, shapes, and prepositions) to children for whom English is not a native language. In a pilot study we compare our AR system to a traditional non-AR system. The results indicate a potentially better learning outcome using the TeachAR system than the traditional system. It also showed that children enjoyed using AR-based methods. However, it also showed a few usability issues with the TeachAR interface, which we will improve on in the future.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Teaching English to children who do not come from an English speaking background is an interesting challenge for educators. In this paper, we present an Augmented reality (AR) tool, TeachAR, for teaching basic English words (colors, shapes, and prepositions) to children for whom English is not a native language. In a pilot study we compare our AR system to a traditional non-AR system. The results indicate a potentially better learning outcome using the TeachAR system than the traditional system. It also showed that children enjoyed using AR-based methods. However, it also showed a few usability issues with the TeachAR interface, which we will improve on in the future.", "fno": "07836467", "keywords": [ "Augmented Reality", "Computer Aided Instruction", "Graphical User Interfaces", "Human Computer Interaction", "Linguistics", "Teaching", "Usability Issues", "Teach AR Interface", "Learning Outcome", "Basic English Words Teaching", "AR Tool", "Nonnative Children", "Interactive Augmented Reality Tool", "Shape", "Education", "Image Color Analysis", "Speech", "Color", "Augmented Reality", "Games", "Augmented Reality", "Teaching And Learning", "English Language", "Children", "Non Native Speakers" ], "authors": [ { "affiliation": null, "fullName": "Che Samihah Che Dalim", "givenName": "Che Samihah Che", "surname": "Dalim", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Arindam Dey", "givenName": "Arindam", "surname": "Dey", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Thammathip Piumsomboon", "givenName": "Thammathip", "surname": "Piumsomboon", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mark Billinghurst", "givenName": "Mark", "surname": "Billinghurst", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shahrizal Sunar", "givenName": "Shahrizal", "surname": "Sunar", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismarw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-09-01T00:00:00", "pubType": "proceedings", "pages": "82-86", "year": "2016", "issn": null, "isbn": "978-1-5090-3740-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07836466", "articleId": "12OmNx7G5Tm", "__typename": "AdjacentArticleType" }, "next": { "fno": "07836468", "articleId": "12OmNzDehaq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismarw/2016/3740/0/07836534", "title": "TeachAR: An Interactive Augmented Reality Tool for Teaching Basic English to Non-native Children", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836534/12OmNB9t6ld", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2014/4261/0/4261a114", "title": "Music-AR: Augmented Reality in Teaching the Concept of Sound Loudness to Children in Pre-School", "doi": null, "abstractUrl": "/proceedings-article/svr/2014/4261a114/12OmNBOCWeh", "parentPublication": { "id": "proceedings/svr/2014/4261/0", "title": "2014 XVI Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a890", "title": "An Augmented Reality 3D Pop-Up Book: The Development of a Multimedia Project for English Language Teaching", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a890/12OmNqN6R1b", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402561", "title": "Using children's developmental psychology to guide augmented-reality design and usability", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402561/12OmNrIrPhx", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2013/5009/0/5009a439", "title": "VECAR: Virtual English Classroom with Markerless Augmented Reality and Intuitive Gesture Interaction", "doi": null, "abstractUrl": "/proceedings-article/icalt/2013/5009a439/12OmNwJPMVw", "parentPublication": { "id": "proceedings/icalt/2013/5009/0", "title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciss/2015/8611/0/07371036", "title": "The Effects of Learning Style on Mobile Augmented-Reality-Facilitated English Vocabulary Learning", "doi": null, "abstractUrl": "/proceedings-article/iciss/2015/07371036/12OmNzIUg42", "parentPublication": { "id": "proceedings/iciss/2015/8611/0", "title": "2015 2nd International Conference on Information Science and Security (ICISS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2022/9519/0/951900a256", "title": "Using Thematic English Learning and Augmented Reality to Enhance Vocabulary Learning Motivation and Enjoyment of Elementary School Students", "doi": null, "abstractUrl": "/proceedings-article/icalt/2022/951900a256/1FUUfSsF2es", "parentPublication": { "id": "proceedings/icalt/2022/9519/0", "title": "2022 International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aemcse/2022/8474/0/847400a789", "title": "Research on visual art design of children&#x2019;s picture books based on Augmented Reality Technology", "doi": null, "abstractUrl": "/proceedings-article/aemcse/2022/847400a789/1IlO6tD7Ntu", "parentPublication": { "id": "proceedings/aemcse/2022/8474/0", "title": "2022 5th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2019/4540/0/08864548", "title": "The Usability of the Microsoft HoloLens for an Augmented Reality Game to Teach Elementary School Children", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2019/08864548/1e5ZpUVkjVS", "parentPublication": { "id": "proceedings/vs-games/2019/4540/0", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cipae/2021/2665/0/266500a018", "title": "Research on the Application of Augmented Reality Technology in College English Teaching", "doi": null, "abstractUrl": "/proceedings-article/cipae/2021/266500a018/1yQAUh3DQm4", "parentPublication": { "id": "proceedings/cipae/2021/2665/0", "title": "2021 International Conference on Computers, Information Processing and Advanced Education (CIPAE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNyVes5J", "doi": "10.1109/VR.2017.7892322", "title": "Real-time interactive AR system for broadcasting", "normalizedTitle": "Real-time interactive AR system for broadcasting", "abstract": "For live television broadcast such as the educational program for children conducted through viewer participation, the smooth integration of virtual contents and the interaction between the casts and them are quite important issues. Recently there have been many attempts to make aggressive use of interactive virtual contents in live broadcast due to the advancement of AR/VR technology and virtual studio technology. These previous works have many limitations that do not support real-time 3D space recognition or immersive interaction. In this sense, we propose an augmented reality based real-time broadcasting system which perceives the indoor space using a broadcasting camera and a RGB-D camera. Also, the system can support the real-time interaction between the augmented virtual contents and the casts. The contribution of this work is the development of a new augmented reality based broadcasting system that not only enables filming using compatible interactive 3D contents in live broadcast but also drastically reduces the production costs. For the practical use, the proposed system was demonstrated in the actual broadcast program called “Ding Dong Dang Kindergarten” which is a representative children educational program on the national broadcasting channel of Korea.", "abstracts": [ { "abstractType": "Regular", "content": "For live television broadcast such as the educational program for children conducted through viewer participation, the smooth integration of virtual contents and the interaction between the casts and them are quite important issues. Recently there have been many attempts to make aggressive use of interactive virtual contents in live broadcast due to the advancement of AR/VR technology and virtual studio technology. These previous works have many limitations that do not support real-time 3D space recognition or immersive interaction. In this sense, we propose an augmented reality based real-time broadcasting system which perceives the indoor space using a broadcasting camera and a RGB-D camera. Also, the system can support the real-time interaction between the augmented virtual contents and the casts. The contribution of this work is the development of a new augmented reality based broadcasting system that not only enables filming using compatible interactive 3D contents in live broadcast but also drastically reduces the production costs. For the practical use, the proposed system was demonstrated in the actual broadcast program called “Ding Dong Dang Kindergarten” which is a representative children educational program on the national broadcasting channel of Korea.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "For live television broadcast such as the educational program for children conducted through viewer participation, the smooth integration of virtual contents and the interaction between the casts and them are quite important issues. Recently there have been many attempts to make aggressive use of interactive virtual contents in live broadcast due to the advancement of AR/VR technology and virtual studio technology. These previous works have many limitations that do not support real-time 3D space recognition or immersive interaction. In this sense, we propose an augmented reality based real-time broadcasting system which perceives the indoor space using a broadcasting camera and a RGB-D camera. Also, the system can support the real-time interaction between the augmented virtual contents and the casts. The contribution of this work is the development of a new augmented reality based broadcasting system that not only enables filming using compatible interactive 3D contents in live broadcast but also drastically reduces the production costs. For the practical use, the proposed system was demonstrated in the actual broadcast program called “Ding Dong Dang Kindergarten” which is a representative children educational program on the national broadcasting channel of Korea.", "fno": "07892322", "keywords": [ "Three Dimensional Displays", "Broadcasting", "Cameras", "Real Time Systems", "Augmented Reality", "Image Recognition", "Educational Programs", "AR Broadcasting", "Mesh Reconstruction", "Indoor Structure" ], "authors": [ { "affiliation": "ETRI, Rep. of Korea", "fullName": "Hyunwoo Cho", "givenName": "Hyunwoo", "surname": "Cho", "__typename": "ArticleAuthorType" }, { "affiliation": "ETRI, Rep. of Korea", "fullName": "Sung-Uk Jung", "givenName": "Sung-Uk", "surname": "Jung", "__typename": "ArticleAuthorType" }, { "affiliation": "ETRI, Rep. of Korea", "fullName": "Hyung-Keun Jee", "givenName": "Hyung-Keun", "surname": "Jee", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "353-354", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892321", "articleId": "12OmNzZWbHq", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892323", "articleId": "12OmNCcKQu9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icce/2009/2558/0/05012342", "title": "DIVE : A P2P-based personal broadcasting system for consumer electronics", "doi": null, "abstractUrl": "/proceedings-article/icce/2009/05012342/12OmNAQrYDh", "parentPublication": { "id": "proceedings/icce/2009/2558/0", "title": "2009 Digest of Technical Papers International Conference on Consumer Electronics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2002/1760/0/17600410", "title": "Adaptive Live Broadcasting for Highly-Demanded Videos", "doi": null, "abstractUrl": "/proceedings-article/icpads/2002/17600410/12OmNAWpyub", "parentPublication": { "id": "proceedings/icpads/2002/1760/0", "title": "Proceedings of the Ninth International Conference on Parallel and Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nbis/2013/2510/0/2510a332", "title": "A Scheduling Method for Division Based Broadcasting Considering Dynamic Update", "doi": null, "abstractUrl": "/proceedings-article/nbis/2013/2510a332/12OmNAndilo", "parentPublication": { "id": "proceedings/nbis/2013/2510/0", "title": "2013 16th International Conference on Network-Based Information Systems (NBiS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2015/0379/0/0379a403", "title": "On Influencing Mobile Live Video Broadcasting Users", "doi": null, "abstractUrl": "/proceedings-article/ism/2015/0379a403/12OmNvJXeCo", "parentPublication": { "id": "proceedings/ism/2015/0379/0", "title": "2015 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nbis/2012/4779/0/4779a453", "title": "A Scheduling Scheme for Improving Error Resilience on Media Data Broadcasting", "doi": null, "abstractUrl": "/proceedings-article/nbis/2012/4779a453/12OmNwcCIVn", "parentPublication": { "id": "proceedings/nbis/2012/4779/0", "title": "2012 15th International Conference on Network-Based Information Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2017/6029/0/07920892", "title": "Different Worlds Broadcasting: A Distributed Internet Live Broadcasting System with Video and Audio Effects", "doi": null, "abstractUrl": "/proceedings-article/aina/2017/07920892/12OmNwoPtnq", "parentPublication": { "id": "proceedings/aina/2017/6029/0", "title": "2017 IEEE 31st International Conference on Advanced Information Networking and Applications (AINA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nbis/2016/0979/0/0979a311", "title": "A Distributed Video Processing System for Internet Live Broadcasting Services", "doi": null, "abstractUrl": "/proceedings-article/nbis/2016/0979a311/12OmNxaNGkM", "parentPublication": { "id": "proceedings/nbis/2016/0979/0", "title": "2016 19th International Conference on Network-Based Information Systems (NBiS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a914", "title": "Effective Spatial Data Broadcasting", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a914/12OmNyxXltD", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itme/2019/3918/0/391800a336", "title": "Current Situation Analysis and Development Strategy Research on Educational Live Broadcasting Technology Based on SWOT Theory in Post-MOOC Era", "doi": null, "abstractUrl": "/proceedings-article/itme/2019/391800a336/1gRxehPeDfy", "parentPublication": { "id": "proceedings/itme/2019/3918/0", "title": "2019 10th International Conference on Information Technology in Medicine and Education (ITME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2021/2463/0/246300b423", "title": "Same World Broadcasting: An Internet Broadcasting System for Real-Time Distributed Video Compositions", "doi": null, "abstractUrl": "/proceedings-article/compsac/2021/246300b423/1wLcq89zMOI", "parentPublication": { "id": "proceedings/compsac/2021/2463/0", "title": "2021 IEEE 45th Annual Computers, Software, and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAXxXaK", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNzXWZKe", "doi": "10.1109/ICCV.2017.25", "title": "Colored Point Cloud Registration Revisited", "normalizedTitle": "Colored Point Cloud Registration Revisited", "abstract": "We present an algorithm for aligning two colored point clouds. The key idea is to optimize a joint photometric and geometric objective that locks the alignment along both the normal direction and the tangent plane. We extend a photometric objective for aligning RGB-D images to point clouds, by locally parameterizing the point cloud with a virtual camera. Experiments demonstrate that our algorithm is more accurate and more robust than prior point cloud registration algorithms, including those that utilize color information. We use the presented algorithms to enhance a state-of-the-art scene reconstruction system. The precision of the resulting system is demonstrated on real-world scenes with accurate ground-truth models.", "abstracts": [ { "abstractType": "Regular", "content": "We present an algorithm for aligning two colored point clouds. The key idea is to optimize a joint photometric and geometric objective that locks the alignment along both the normal direction and the tangent plane. We extend a photometric objective for aligning RGB-D images to point clouds, by locally parameterizing the point cloud with a virtual camera. Experiments demonstrate that our algorithm is more accurate and more robust than prior point cloud registration algorithms, including those that utilize color information. We use the presented algorithms to enhance a state-of-the-art scene reconstruction system. The precision of the resulting system is demonstrated on real-world scenes with accurate ground-truth models.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present an algorithm for aligning two colored point clouds. The key idea is to optimize a joint photometric and geometric objective that locks the alignment along both the normal direction and the tangent plane. We extend a photometric objective for aligning RGB-D images to point clouds, by locally parameterizing the point cloud with a virtual camera. Experiments demonstrate that our algorithm is more accurate and more robust than prior point cloud registration algorithms, including those that utilize color information. We use the presented algorithms to enhance a state-of-the-art scene reconstruction system. The precision of the resulting system is demonstrated on real-world scenes with accurate ground-truth models.", "fno": "1032a143", "keywords": [ "Cameras", "Geometry", "Image Colour Analysis", "Image Reconstruction", "Image Registration", "Optimisation", "Tangent Plane", "Prior Point Cloud Registration Algorithms", "Color Information", "Geometric Objective", "Colored Point Cloud Registration", "RGB D Images Aligning", "State Of The Art Scene Reconstruction System", "Photometric Objective Optimization", "Three Dimensional Displays", "Image Color Analysis", "Cameras", "Optimization", "Iterative Closest Point Algorithm", "Image Reconstruction", "Image Registration" ], "authors": [ { "affiliation": null, "fullName": "Jaesik Park", "givenName": "Jaesik", "surname": "Park", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Qian-Yi Zhou", "givenName": "Qian-Yi", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Vladlen Koltun", "givenName": "Vladlen", "surname": "Koltun", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "143-152", "year": "2017", "issn": "2380-7504", "isbn": "978-1-5386-1032-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "1032a133", "articleId": "12OmNvIxeVC", "__typename": "AdjacentArticleType" }, "next": { "fno": "1032a153", "articleId": "12OmNyS6Rzc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmip/2017/5954/0/5954a058", "title": "A Log-Polar Feature Guided Iterative Closest Point Method for Image Registration", "doi": null, "abstractUrl": "/proceedings-article/icmip/2017/5954a058/12OmNBvkdlJ", "parentPublication": { "id": "proceedings/icmip/2017/5954/0", "title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccis/2010/4270/0/4270a565", "title": "An Improved ICP Algorithm for Point Cloud Registration", "doi": null, "abstractUrl": "/proceedings-article/iccis/2010/4270a565/12OmNx5YvkB", "parentPublication": { "id": "proceedings/iccis/2010/4270/0", "title": "2010 International Conference on Computational and Information Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a232", "title": "Efficient Large-Scale Point Cloud Registration Using Loop Closures", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a232/12OmNxA3YSC", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2001/02/i0165", "title": "Robust Point Correspondence Applied to Two-and Three-Dimensional Image Registration", "doi": null, "abstractUrl": "/journal/tp/2001/02/i0165/13rRUNvyaga", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c993", "title": "Inverse Composition Discriminative Optimization for Point Cloud Registration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c993/17D45XERmmv", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200n3249", "title": "Provably Approximated Point Cloud Registration", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200n3249/1BmHRpR72Cc", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiam/2021/1732/0/173200a407", "title": "An Improved ICP Point Cloud Registration Algorithm Based on Three-Points Congruent Sets", "doi": null, "abstractUrl": "/proceedings-article/aiam/2021/173200a407/1BzTJDeh3Ms", "parentPublication": { "id": "proceedings/aiam/2021/1732/0", "title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g646", "title": "Multimodal Colored Point Cloud to Image Alignment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g646/1H0KN0OXgGc", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fcsit/2022/6353/0/635300a046", "title": "3D Point Cloud Coarse Registration Algorithm Based on Center of Gravity and Centroid Transformation", "doi": null, "abstractUrl": "/proceedings-article/fcsit/2022/635300a046/1Ml2cP3m9YQ", "parentPublication": { "id": "proceedings/fcsit/2022/6353/0", "title": "2022 Euro-Asia Conference on Frontiers of Computer Science and Information Technology (FCSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2019/6092/0/609200a132", "title": "Point Cloud Registration Algorithm Based on Combination of NDT and PLICP", "doi": null, "abstractUrl": "/proceedings-article/cis/2019/609200a132/1i5m6DnRNsc", "parentPublication": { "id": "proceedings/cis/2019/6092/0", "title": "2019 15th International Conference on Computational Intelligence and Security (CIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyqRn7h", "title": "Proceedings. International Symposium on Mixed and Augmented Reality", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2002", "__typename": "ProceedingType" }, "article": { "id": "12OmNzuIjk2", "doi": "10.1109/ISMAR.2002.1115062", "title": "3D Live: Real Time Captured Content for Mixed Reality", "normalizedTitle": "3D Live: Real Time Captured Content for Mixed Reality", "abstract": "We present a complete system for live capture of 3-D content and simultaneous presentation in augmented reality. The user sees the real world from his viewpoint, but modified so that the image of a remote collaborator is rendered into the scene. Fifteen cameras surround the collaborator, and the resulting video streams are used to construct a three-dimensional model of the subject using a shape-from-silhouette algorithm. Users view a two-dimensional fiducial marker using a video-see-through augmented reality interface. The geometric relationship between the marker and head-mounted camera is calculated, and the equivalent view of the subject is computed and drawn into the scene. Our system can generate 384 ? 288 pixel images of the models at 25 fps, with a latency of <100ms. The result gives the strong impression that the subject is a real part of the 3-D scene. We demonstrate applications of this system in 3-D video-conferencing and entertainment.", "abstracts": [ { "abstractType": "Regular", "content": "We present a complete system for live capture of 3-D content and simultaneous presentation in augmented reality. The user sees the real world from his viewpoint, but modified so that the image of a remote collaborator is rendered into the scene. Fifteen cameras surround the collaborator, and the resulting video streams are used to construct a three-dimensional model of the subject using a shape-from-silhouette algorithm. Users view a two-dimensional fiducial marker using a video-see-through augmented reality interface. The geometric relationship between the marker and head-mounted camera is calculated, and the equivalent view of the subject is computed and drawn into the scene. Our system can generate 384 ? 288 pixel images of the models at 25 fps, with a latency of <100ms. The result gives the strong impression that the subject is a real part of the 3-D scene. We demonstrate applications of this system in 3-D video-conferencing and entertainment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a complete system for live capture of 3-D content and simultaneous presentation in augmented reality. The user sees the real world from his viewpoint, but modified so that the image of a remote collaborator is rendered into the scene. Fifteen cameras surround the collaborator, and the resulting video streams are used to construct a three-dimensional model of the subject using a shape-from-silhouette algorithm. Users view a two-dimensional fiducial marker using a video-see-through augmented reality interface. The geometric relationship between the marker and head-mounted camera is calculated, and the equivalent view of the subject is computed and drawn into the scene. Our system can generate 384 ? 288 pixel images of the models at 25 fps, with a latency of <100ms. The result gives the strong impression that the subject is a real part of the 3-D scene. We demonstrate applications of this system in 3-D video-conferencing and entertainment.", "fno": "17810007", "keywords": [ "Mixed Reality", "Augmented Reality", "Shape From Silhouette", "3 D Reconstruction", "Video Conferencing" ], "authors": [ { "affiliation": "National University of Singapore", "fullName": "Simon Prince", "givenName": "Simon", "surname": "Prince", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Singapore", "fullName": "Adrian David Cheok", "givenName": "Adrian David", "surname": "Cheok", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Singapore", "fullName": "Farzam Farbiz", "givenName": "Farzam", "surname": "Farbiz", "__typename": "ArticleAuthorType" }, { "affiliation": "Zaxel Systems", "fullName": "Todd Williamson", "givenName": "Todd", "surname": "Williamson", "__typename": "ArticleAuthorType" }, { "affiliation": "Zaxel Systems", "fullName": "Nik Johnson", "givenName": "Nik", "surname": "Johnson", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Washington", "fullName": "Mark Billinghurst", "givenName": "Mark", "surname": "Billinghurst", "__typename": "ArticleAuthorType" }, { "affiliation": "Hiroshima City University", "fullName": "Hirokazu Kato", "givenName": "Hirokazu", "surname": "Kato", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2002-09-01T00:00:00", "pubType": "proceedings", "pages": "7", "year": "2002", "issn": null, "isbn": "0-7695-1781-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "17810003", "articleId": "12OmNwNeYx2", "__typename": "AdjacentArticleType" }, "next": { "fno": "17810014", "articleId": "12OmNAS9zAu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wmsvm/2010/7077/0/05558353", "title": "An Extended Marker-Based Tracking System for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/wmsvm/2010/05558353/12OmNAiFI7k", "parentPublication": { "id": "proceedings/wmsvm/2010/7077/0", "title": "2010 Second International Conference on Modeling, Simulation and Visualization Methods (WMSVM 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2009/3651/0/3651a148", "title": "It's All Done with Mirrors: Calibration-and-Correspondence-Free 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/crv/2009/3651a148/12OmNC943Ga", "parentPublication": { "id": "proceedings/crv/2009/3651/0", "title": "2009 Canadian Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2013/5050/0/5050a716", "title": "DR-Marker: A Novel Diminishing-Reality-Based AR Registration", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a716/12OmNqG0SYN", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2002/1781/0/17810317", "title": "3D Live: Real Time Captured Content for Mixed Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2002/17810317/12OmNvTjZSz", "parentPublication": { "id": "proceedings/ismar/2002/1781/0", "title": "Proceedings. International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836528", "title": "Mixed Reality Extended TV", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836528/12OmNx7ouOs", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802045", "title": "Transitional Augmented Reality navigation for live captured scenes", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802045/12OmNyRPgKH", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/06/v0706", "title": "Real-Time 3D Human Capture System for Mixed-Reality Art and Entertainment", "doi": null, "abstractUrl": "/journal/tg/2005/06/v0706/13rRUxbTMyI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a798", "title": "Moving Soon? Rearranging Furniture using Mixed Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a798/1CJcJw3fs2s", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798018", "title": "Virtual Agent Positioning Driven by Scene Semantics in Mixed Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798018/1cJ0M7OmS2s", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797966", "title": "A Mixed Presence Collaborative Mixed Reality System", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797966/1cJ19fldjVu", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAsTgX3", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNAS9zxR", "doi": "10.1109/CVPRW.2009.5204318", "title": "Multi-view reconstruction for projector camera systems based on bundle adjustment", "normalizedTitle": "Multi-view reconstruction for projector camera systems based on bundle adjustment", "abstract": "Range scanners using projector-camera systems have been studied actively in recent years as methods for measuring 3D shapes accurately and cost-effectively. To acquire an entire 3D shape of an object with such systems, the shape of the object should be captured from multiple directions and the set of captured shapes should be aligned using algorithms such as ICPs. Then, the aligned shapes are integrated into a single 3D shape model. However, the captured shapes are often distorted due to errors of intrinsic or extrinsic parameters of the camera and the projector. Because of these distortions, gaps between overlapped surfaces remain even after aligning the 3D shapes. In this paper, we propose a new method to capture an entire shape with high precision using an active stereo range scanner which consists of a projector and a camera with fixed relative positions. In the proposed method, minimization of calibration errors of the projector-camera pair and registration errors between 3D shapes from different viewpoints are simultaneously achieved. The proposed method can be considered as a variation of bundle adjustment techniques adapted to projector-camera systems. Since acquisition of correspondences between different views is not easy for projector-camera systems, a solution for the problem is also presented.", "abstracts": [ { "abstractType": "Regular", "content": "Range scanners using projector-camera systems have been studied actively in recent years as methods for measuring 3D shapes accurately and cost-effectively. To acquire an entire 3D shape of an object with such systems, the shape of the object should be captured from multiple directions and the set of captured shapes should be aligned using algorithms such as ICPs. Then, the aligned shapes are integrated into a single 3D shape model. However, the captured shapes are often distorted due to errors of intrinsic or extrinsic parameters of the camera and the projector. Because of these distortions, gaps between overlapped surfaces remain even after aligning the 3D shapes. In this paper, we propose a new method to capture an entire shape with high precision using an active stereo range scanner which consists of a projector and a camera with fixed relative positions. In the proposed method, minimization of calibration errors of the projector-camera pair and registration errors between 3D shapes from different viewpoints are simultaneously achieved. The proposed method can be considered as a variation of bundle adjustment techniques adapted to projector-camera systems. Since acquisition of correspondences between different views is not easy for projector-camera systems, a solution for the problem is also presented.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Range scanners using projector-camera systems have been studied actively in recent years as methods for measuring 3D shapes accurately and cost-effectively. To acquire an entire 3D shape of an object with such systems, the shape of the object should be captured from multiple directions and the set of captured shapes should be aligned using algorithms such as ICPs. Then, the aligned shapes are integrated into a single 3D shape model. However, the captured shapes are often distorted due to errors of intrinsic or extrinsic parameters of the camera and the projector. Because of these distortions, gaps between overlapped surfaces remain even after aligning the 3D shapes. In this paper, we propose a new method to capture an entire shape with high precision using an active stereo range scanner which consists of a projector and a camera with fixed relative positions. In the proposed method, minimization of calibration errors of the projector-camera pair and registration errors between 3D shapes from different viewpoints are simultaneously achieved. The proposed method can be considered as a variation of bundle adjustment techniques adapted to projector-camera systems. Since acquisition of correspondences between different views is not easy for projector-camera systems, a solution for the problem is also presented.", "fno": "05204318", "keywords": [ "Calibration", "Cameras", "Image Reconstruction", "Optical Projectors", "Stereo Image Processing", "Multiview Reconstruction", "Projector Camera Systems", "Bundle Adjustment", "3 D Shape Alignment", "Stereo Range Scanner", "Calibration Errors", "Cameras", "Shape Measurement", "Iterative Algorithms", "Iterative Closest Point Algorithm", "Calibration", "Layout", "Cities And Towns", "Minimization Methods", "Surface Reconstruction", "Parameter Estimation" ], "authors": [ { "affiliation": "Faculty of Information Sciences, Hiroshima City Univ., Japan", "fullName": "Ryo Furuakwa", "givenName": "Ryo", "surname": "Furuakwa", "__typename": "ArticleAuthorType" }, { "affiliation": "Faculty of Engineering, Saitama Univ., Japan", "fullName": "Kenji Inose", "givenName": "Kenji", "surname": "Inose", "__typename": "ArticleAuthorType" }, { "affiliation": "Faculty of Engineering, Saitama Univ., Japan", "fullName": "Hiroshi Kawasaki", "givenName": "Hiroshi", "surname": "Kawasaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-06-01T00:00:00", "pubType": "proceedings", "pages": "69-76", "year": "2009", "issn": "2160-7508", "isbn": "978-1-4244-3994-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05204317", "articleId": "12OmNCxtyKC", "__typename": "AdjacentArticleType" }, "next": { "fno": "05204319", "articleId": "12OmNwE9OQL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dimpvt/2011/4369/0/4369a049", "title": "A System for Capturing Textured 3D Shapes Based on One-Shot Grid Pattern with Multi-band Camera and Infrared Projector", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2011/4369a049/12OmNA0dMTy", "parentPublication": { "id": "proceedings/3dimpvt/2011/4369/0", "title": "2011 International Conference on 3D Imaging, Modeling, Processing, Visualization and Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761601", "title": "Calibration of projector-camera systems from virtual mutual projection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118b486", "title": "Photometric Bundle Adjustment for Dense Multi-view 3D Modeling", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118b486/12OmNBp52wC", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dim/2003/1991/0/19910491", "title": "Interactive Shape Acquisition using Marker Attached Laser Projector", "doi": null, "abstractUrl": "/proceedings-article/3dim/2003/19910491/12OmNvq5jyo", "parentPublication": { "id": "proceedings/3dim/2003/1991/0", "title": "3D Digital Imaging and Modeling, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a464", "title": "Simple, Accurate, and Robust Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04270475", "title": "Projector Calibration using Arbitrary Planes and Calibrated Camera", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270475/12OmNxYtu7r", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2014/4308/0/4308a449", "title": "Projection Center Calibration for a Co-located Projector Camera System", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4", "parentPublication": { "id": "proceedings/cvprw/2014/4308/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543474", "title": "Camera-projector matching using an unstructured video stream", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543474/12OmNzUxO57", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a453", "title": "Simultaneous Shape Registration and Active Stereo Shape Reconstruction using Modified Bundle Adjustment", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a453/1ezRD0dXDhe", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNA0dMLZ", "title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001", "acronym": "cvpr", "groupId": "1000147", "volume": "2", "displayVolume": "3", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNB8Cj43", "doi": "10.1109/CVPR.2001.991004", "title": "A Self-Correcting Projector", "normalizedTitle": "A Self-Correcting Projector", "abstract": "We describe a calibration and rendering technique for a projector that can render rectangular images under keystoned position. The projector utilizes a rigidly attached camera to form a stereo pair. We describe a very easy to use technique for calibration of the projector-camera pair using only black planar surfaces. We present an efficient rendering method to pre-warp images so that they appear correctly on the screen, and show experimental results.", "abstracts": [ { "abstractType": "Regular", "content": "We describe a calibration and rendering technique for a projector that can render rectangular images under keystoned position. The projector utilizes a rigidly attached camera to form a stereo pair. We describe a very easy to use technique for calibration of the projector-camera pair using only black planar surfaces. We present an efficient rendering method to pre-warp images so that they appear correctly on the screen, and show experimental results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We describe a calibration and rendering technique for a projector that can render rectangular images under keystoned position. The projector utilizes a rigidly attached camera to form a stereo pair. We describe a very easy to use technique for calibration of the projector-camera pair using only black planar surfaces. We present an efficient rendering method to pre-warp images so that they appear correctly on the screen, and show experimental results.", "fno": "127220504", "keywords": [ "Projector", "Camera", "Calibration", "Homography" ], "authors": [ { "affiliation": "Mitsubishi Electric Research Laboratories (MERL)", "fullName": "Ramesh Raskar", "givenName": "Ramesh", "surname": "Raskar", "__typename": "ArticleAuthorType" }, { "affiliation": "Mitsubishi Electric Research Laboratories (MERL)", "fullName": "Paul Beardsley", "givenName": "Paul", "surname": "Beardsley", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-12-01T00:00:00", "pubType": "proceedings", "pages": "504", "year": "2001", "issn": "1063-6919", "isbn": "0-7695-1272-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "127220498", "articleId": "12OmNyo1nNt", "__typename": "AdjacentArticleType" }, "next": { "fno": "127220509", "articleId": "12OmNBU1jOd", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2011/0529/0/05981781", "title": "Simultaneous self-calibration of a projector and a camera using structured light", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981781/12OmNBzRNuv", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvmp/2009/3893/0/3893a118", "title": "A Portable Projector Extended for Object-Centered Real-Time Interactions", "doi": null, "abstractUrl": "/proceedings-article/cvmp/2009/3893a118/12OmNC8MsyT", "parentPublication": { "id": "proceedings/cvmp/2009/3893/0", "title": "2009 Conference for Visual Media Production", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a464", "title": "Simple, Accurate, and Robust Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2012/4608/0/4608b285", "title": "Research of Color Correction Algorithm for Multi-projector Screen Based on Projector-Camera System", "doi": null, "abstractUrl": "/proceedings-article/isdea/2012/4608b285/12OmNxwENpp", "parentPublication": { "id": "proceedings/isdea/2012/4608/0", "title": "2012 Second International Conference on Intelligent System Design and Engineering Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/2/01315159", "title": "A flexible projector-camera system for multi-planar displays", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315159/12OmNzBwGyN", "parentPublication": { "id": "proceedings/cvpr/2004/2158/2", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccet/2009/3521/1/3521a462", "title": "A Novel Binary Code Based Projector-Camera System Registration Method", "doi": null, "abstractUrl": "/proceedings-article/iccet/2009/3521a462/12OmNzYwcew", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2012/4836/0/4836a007", "title": "Real-time Continuous Geometric Calibration for Projector-Camera System under Ambient Illumination", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2012/4836a007/12OmNzahc85", "parentPublication": { "id": "proceedings/icvrv/2012/4836/0", "title": "2012 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2005/12/i1845", "title": "Autocalibration of a Projector-Camera System", "doi": null, "abstractUrl": "/journal/tp/2005/12/i1845/13rRUxASuiM", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2008/10/ttp2008101831", "title": "Robust and Accurate Visual Echo Cancelation in a Full-duplex Projector-Camera System", "doi": null, "abstractUrl": "/journal/tp/2008/10/ttp2008101831/13rRUxjQyip", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvAiSpZ", "title": "2015 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNBJw9RK", "doi": "10.1109/VR.2015.7223400", "title": "Semi-automatic calibration of a projector-camera system using arbitrary objects with known geometry", "normalizedTitle": "Semi-automatic calibration of a projector-camera system using arbitrary objects with known geometry", "abstract": "We propose a new semi-automatic calibration approach for projector-camera systems that — unlike existing auto-calibration approaches — additionally recovers the necessary global scale by projecting on an arbitrary object of known geometry from one view. Our method therefore combines surface registration with bundle adjustment optimization on points reconstructed from structured light projections. In simulations on virtual data and experiments with real data we demonstrate that our approach estimates the global scale robustly and is furthermore able to improve incorrectly guessed intrinsic and extrinsic calibration parameters.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a new semi-automatic calibration approach for projector-camera systems that — unlike existing auto-calibration approaches — additionally recovers the necessary global scale by projecting on an arbitrary object of known geometry from one view. Our method therefore combines surface registration with bundle adjustment optimization on points reconstructed from structured light projections. In simulations on virtual data and experiments with real data we demonstrate that our approach estimates the global scale robustly and is furthermore able to improve incorrectly guessed intrinsic and extrinsic calibration parameters.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a new semi-automatic calibration approach for projector-camera systems that — unlike existing auto-calibration approaches — additionally recovers the necessary global scale by projecting on an arbitrary object of known geometry from one view. Our method therefore combines surface registration with bundle adjustment optimization on points reconstructed from structured light projections. In simulations on virtual data and experiments with real data we demonstrate that our approach estimates the global scale robustly and is furthermore able to improve incorrectly guessed intrinsic and extrinsic calibration parameters.", "fno": "07223400", "keywords": [ "Calibration", "Image Reconstruction", "Cameras", "Three Dimensional Displays", "Surface Reconstruction", "Electronic Mail", "Conferences" ], "authors": [ { "affiliation": "EXTEND3D GmbH", "fullName": "Christoph Resch", "givenName": "Christoph", "surname": "Resch", "__typename": "ArticleAuthorType" }, { "affiliation": "EXTEND3D GmbH", "fullName": "Peter Keitler", "givenName": "Peter", "surname": "Keitler", "__typename": "ArticleAuthorType" }, { "affiliation": "Volkswagen AG", "fullName": "Christoffer Menk", "givenName": "Christoffer", "surname": "Menk", "__typename": "ArticleAuthorType" }, { "affiliation": "TU München", "fullName": "Gudrun Klinker", "givenName": "Gudrun", "surname": "Klinker", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-03-01T00:00:00", "pubType": "proceedings", "pages": "271-272", "year": "2015", "issn": null, "isbn": "978-1-4799-1727-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07223399", "articleId": "12OmNzaQoq2", "__typename": "AdjacentArticleType" }, "next": { "fno": "07223401", "articleId": "12OmNrkBwoD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2017/4822/0/07926707", "title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccms/2010/3941/1/3941a489", "title": "A Calibration Algorithm for Nonplanar Array with Arbitrary Geometry Using Known Sources", "doi": null, "abstractUrl": "/proceedings-article/iccms/2010/3941a489/12OmNBLdKEG", "parentPublication": { "id": "proceedings/iccms/2010/3941/3", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981781", "title": "Simultaneous self-calibration of a projector and a camera using structured light", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981781/12OmNBzRNuv", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2017/2943/0/2943a042", "title": "Robust Geometric Self-Calibration of Generic Multi-Projector Camera Systems", "doi": null, "abstractUrl": "/proceedings-article/ismar/2017/2943a042/12OmNCbCrRh", "parentPublication": { "id": "proceedings/ismar/2017/2943/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204317", "title": "Geometric video projector auto-calibration", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aipr/2016/3284/0/08010586", "title": "Auto-calibration of multi-projector systems on arbitrary shapes", "doi": null, "abstractUrl": "/proceedings-article/aipr/2016/08010586/12OmNrJiCPx", "parentPublication": { "id": "proceedings/aipr/2016/3284/0", "title": "2016 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/11/07164353", "title": "On-Site Semi-Automatic Calibration and Registration of a Projector-Camera System Using Arbitrary Objects with Known Geometry", "doi": null, "abstractUrl": "/journal/tg/2015/11/07164353/13rRUEgs2M6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08466021", "title": "Auto-Calibration for Dynamic Multi-Projection Mapping on Arbitrary Surfaces", "doi": null, "abstractUrl": "/journal/tg/2018/11/08466021/14M3DYlzziw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/04/08889677", "title": "Automated Geometric Registration for Multi-Projector Displays on Arbitrary 3D Shapes Using Uncalibrated Devices", "doi": null, "abstractUrl": "/journal/tg/2021/04/08889677/1eBugxXEgLe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a261", "title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzcPA9q", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNCbCrRh", "doi": "10.1109/ISMAR.2017.21", "title": "Robust Geometric Self-Calibration of Generic Multi-Projector Camera Systems", "normalizedTitle": "Robust Geometric Self-Calibration of Generic Multi-Projector Camera Systems", "abstract": "Calibration of multi-projector-camera systems (MPCS) is a cumbersome and time-consuming process. It is of great importance to have robust, fast and accurate calibration procedures at hand for a wide variety of practical applications. We propose a fully automated self-calibration method for arbitrarily complex MPCS. It enables reliable and accurate intrinsic and extrinsic calibration without any human parameter tuning. We evaluated the proposed methods using more than ten multi-projection datasets ranging from a toy castle set up consisting of three cameras and one projector up to a half dome display system with more than 30 devices. Comparisons to reference calibrations, which were generated using the standard checkerboard calibration approach [44], show the reliability of our proposed pipeline, while a ground truth evaluation also shows that the resulting reconstructed point cloud accurately matches the shape of the reference geometry. Besides being fully automatic without the necessity of parameter fine tuning, the proposed method also significantly reduces the installation time of MPCS compared to checkerboard-based methods and makes it more suitable for real-world applications.", "abstracts": [ { "abstractType": "Regular", "content": "Calibration of multi-projector-camera systems (MPCS) is a cumbersome and time-consuming process. It is of great importance to have robust, fast and accurate calibration procedures at hand for a wide variety of practical applications. We propose a fully automated self-calibration method for arbitrarily complex MPCS. It enables reliable and accurate intrinsic and extrinsic calibration without any human parameter tuning. We evaluated the proposed methods using more than ten multi-projection datasets ranging from a toy castle set up consisting of three cameras and one projector up to a half dome display system with more than 30 devices. Comparisons to reference calibrations, which were generated using the standard checkerboard calibration approach [44], show the reliability of our proposed pipeline, while a ground truth evaluation also shows that the resulting reconstructed point cloud accurately matches the shape of the reference geometry. Besides being fully automatic without the necessity of parameter fine tuning, the proposed method also significantly reduces the installation time of MPCS compared to checkerboard-based methods and makes it more suitable for real-world applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Calibration of multi-projector-camera systems (MPCS) is a cumbersome and time-consuming process. It is of great importance to have robust, fast and accurate calibration procedures at hand for a wide variety of practical applications. We propose a fully automated self-calibration method for arbitrarily complex MPCS. It enables reliable and accurate intrinsic and extrinsic calibration without any human parameter tuning. We evaluated the proposed methods using more than ten multi-projection datasets ranging from a toy castle set up consisting of three cameras and one projector up to a half dome display system with more than 30 devices. Comparisons to reference calibrations, which were generated using the standard checkerboard calibration approach [44], show the reliability of our proposed pipeline, while a ground truth evaluation also shows that the resulting reconstructed point cloud accurately matches the shape of the reference geometry. Besides being fully automatic without the necessity of parameter fine tuning, the proposed method also significantly reduces the installation time of MPCS compared to checkerboard-based methods and makes it more suitable for real-world applications.", "fno": "2943a042", "keywords": [ "Calibration", "Cameras", "Image Reconstruction", "Image Sensors", "Optical Projectors", "Robust Geometric Self Calibration", "Generic Multiprojector Camera Systems", "Cumbersome Time Consuming Process", "Robust Calibration Procedures", "Fast Calibration Procedures", "Accurate Calibration Procedures", "Fully Automated Self Calibration Method", "Arbitrarily Complex MPCS", "Intrinsic Calibration", "Extrinsic Calibration", "Human Parameter Tuning", "Multiprojection Datasets", "Half Dome Display System", "Reference Calibrations", "Ground Truth Evaluation", "Parameter Fine Tuning", "Standard Checkerboard Calibration Approach", "Cameras", "Calibration", "Robustness", "Surface Treatment", "Three Dimensional Displays", "Geometry", "Projector Camera Systems", "Calibration And Registration Of Sensing Systems", "Display Hardware", "Including 3 D", "Stereoscopic And Multi User Entertainment", "Broadcast" ], "authors": [ { "affiliation": null, "fullName": "Simon Willi", "givenName": "Simon", "surname": "Willi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Anselm Grundhöfer", "givenName": "Anselm", "surname": "Grundhöfer", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "42-51", "year": "2017", "issn": null, "isbn": "978-1-5386-2943-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2943a032", "articleId": "12OmNwAbqw0", "__typename": "AdjacentArticleType" }, "next": { "fno": "2943a052", "articleId": "12OmNvoWV1k", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2016/3641/0/3641a063", "title": "Practical and Precise Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a063/12OmNB7cjhR", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761601", "title": "Calibration of projector-camera systems from virtual mutual projection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204317", "title": "Geometric video projector auto-calibration", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2013/5053/0/06475056", "title": "Geometric calibration for a multi-camera-projector system", "doi": null, "abstractUrl": "/proceedings-article/wacv/2013/06475056/12OmNvBrgGd", "parentPublication": { "id": "proceedings/wacv/2013/5053/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d596", "title": "Simultaneous Geometric and Radiometric Calibration of a Projector-Camera Pair", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d596/12OmNwpGgNQ", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a464", "title": "Simple, Accurate, and Robust Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460078", "title": "Calibration-free projector-camera system for spatial augmented reality on planar surfaces", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460078/12OmNzUxO4G", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699178", "title": "A Single-Shot-Per-Pose Camera-Projector Calibration System for Imperfect Planar Targets", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699178/19F1O0IjR8k", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a261", "title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrkjVbL", "title": "Computer Graphics and Applications, Pacific Conference on", "acronym": "pg", "groupId": "1000130", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNqIzgUn", "doi": "10.1109/PG.2007.47", "title": "Radiometric Compensation through Inverse Light Transport", "normalizedTitle": "Radiometric Compensation through Inverse Light Transport", "abstract": "Radiometric compensation techniques allow seamless projections onto complex everyday surfaces. Implemented with projector-camera systems they support the presentation of visual content in situations where projection-optimized screens are not available or not desired - as in museums, historic sites, air-plane cabins, or stage performances. We propose a novel approach that employs the full light transport between projectors and a camera to account for many illumination aspects, such as interreflections, refractions, shadows, and defocus. Precomputing the inverse light transport in combination with an efficient implementation on the GPU makes the real-time compensation of captured local and global light modulations possible.", "abstracts": [ { "abstractType": "Regular", "content": "Radiometric compensation techniques allow seamless projections onto complex everyday surfaces. Implemented with projector-camera systems they support the presentation of visual content in situations where projection-optimized screens are not available or not desired - as in museums, historic sites, air-plane cabins, or stage performances. We propose a novel approach that employs the full light transport between projectors and a camera to account for many illumination aspects, such as interreflections, refractions, shadows, and defocus. Precomputing the inverse light transport in combination with an efficient implementation on the GPU makes the real-time compensation of captured local and global light modulations possible.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Radiometric compensation techniques allow seamless projections onto complex everyday surfaces. Implemented with projector-camera systems they support the presentation of visual content in situations where projection-optimized screens are not available or not desired - as in museums, historic sites, air-plane cabins, or stage performances. We propose a novel approach that employs the full light transport between projectors and a camera to account for many illumination aspects, such as interreflections, refractions, shadows, and defocus. Precomputing the inverse light transport in combination with an efficient implementation on the GPU makes the real-time compensation of captured local and global light modulations possible.", "fno": "30090391", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Gordon Wetzstein", "givenName": "Gordon", "surname": "Wetzstein", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Oliver Bimber", "givenName": "Oliver", "surname": "Bimber", "__typename": "ArticleAuthorType" } ], "idPrefix": "pg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-11-01T00:00:00", "pubType": "proceedings", "pages": "391-399", "year": "2007", "issn": "1550-4085", "isbn": "0-7695-3009-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "30090373", "articleId": "12OmNqGA589", "__typename": "AdjacentArticleType" }, "next": { "fno": "30090403", "articleId": "12OmNybfr3s", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dim/2007/2939/0/29390337", "title": "Light Transport Analysis for 3D Photography", "doi": null, "abstractUrl": "/proceedings-article/3dim/2007/29390337/12OmNqOffAc", "parentPublication": { "id": "proceedings/3dim/2007/2939/0", "title": "2007 6th International Conference on 3-D Digital Imaging and Modeling", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209e382", "title": "Light Transport Refocusing for Unknown Scattering Medium", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209e382/12OmNqzu6Nb", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543460", "title": "Precomputed ROMP for light transport acquisition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543460/12OmNx8fi9Q", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459418", "title": "Radiometric compensation using stratified inverses", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459418/12OmNxFJXtd", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2010/6984/0/05540216", "title": "Analysis of light transport in scattering media", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05540216/12OmNxWLTlm", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2018/2526/0/08368461", "title": "Acquiring and characterizing plane-to-ray indirect light transport", "doi": null, "abstractUrl": "/proceedings-article/iccp/2018/08368461/12OmNzkMlWO", "parentPublication": { "id": "proceedings/iccp/2018/2526/0", "title": "2018 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/01/ttg2008010097", "title": "Real-Time Adaptive Radiometric Compensation", "doi": null, "abstractUrl": "/journal/tg/2008/01/ttg2008010097/13rRUwhpBE2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/10/ttp2011102122", "title": "On the Duality of Forward and Inverse Light Transport", "doi": null, "abstractUrl": "/journal/tp/2011/10/ttp2011102122/13rRUxAASXp", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2003/03/mcg2003030028", "title": "Efficient Light Transport Using Precomputed Visibility", "doi": null, "abstractUrl": "/magazine/cg/2003/03/mcg2003030028/13rRUzpzeHH", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/04/08877764", "title": "Programmable Non-Epipolar Indirect Light Transport: Capture and Analysis", "doi": null, "abstractUrl": "/journal/tg/2021/04/08877764/1emy95qb1NS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy2agRS", "title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "acronym": "cad-graphics", "groupId": "1001488", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNyeECsK", "doi": "10.1109/CADGraphics.2013.85", "title": "Real-Time Appearance Modification of Textured Object Using Superimposed Projection", "normalizedTitle": "Real-Time Appearance Modification of Textured Object Using Superimposed Projection", "abstract": "As commercial projectors become cheaper and smaller, appearance modification technique which employs projection display technologies now impact more areas including culture heritage, product design and architectural relighting. In this paper, we introduced a new method that use superimposed projection to modify the appearance of a non-planar textured object in real-time. Both the geometric and the radiometric models of the system are recovered during the calibration. After the recovery of system parameters, desired images are pre-warped in real-time. The target textured object presents new appearances dynamically when lighted with the custom designed illumination. Experimental results show that our system performs well in both rendering speed and keeping the contrast.", "abstracts": [ { "abstractType": "Regular", "content": "As commercial projectors become cheaper and smaller, appearance modification technique which employs projection display technologies now impact more areas including culture heritage, product design and architectural relighting. In this paper, we introduced a new method that use superimposed projection to modify the appearance of a non-planar textured object in real-time. Both the geometric and the radiometric models of the system are recovered during the calibration. After the recovery of system parameters, desired images are pre-warped in real-time. The target textured object presents new appearances dynamically when lighted with the custom designed illumination. Experimental results show that our system performs well in both rendering speed and keeping the contrast.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As commercial projectors become cheaper and smaller, appearance modification technique which employs projection display technologies now impact more areas including culture heritage, product design and architectural relighting. In this paper, we introduced a new method that use superimposed projection to modify the appearance of a non-planar textured object in real-time. Both the geometric and the radiometric models of the system are recovered during the calibration. After the recovery of system parameters, desired images are pre-warped in real-time. The target textured object presents new appearances dynamically when lighted with the custom designed illumination. Experimental results show that our system performs well in both rendering speed and keeping the contrast.", "fno": "06815049", "keywords": [ "Radiometry", "Image Color Analysis", "Real Time Systems", "Cameras", "Transmission Line Matrix Methods", "Computer Graphics", "Calibration", "Superimposed Projection", "Structure Light", "Color Mixing", "Appearance Modification" ], "authors": [ { "affiliation": null, "fullName": "Feng Chen", "givenName": "Feng", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yue Liu", "givenName": "Yue", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "cad-graphics", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-11-01T00:00:00", "pubType": "proceedings", "pages": "437-438", "year": "2013", "issn": null, "isbn": "978-1-4799-2576-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06815048", "articleId": "12OmNBKmXsf", "__typename": "AdjacentArticleType" }, "next": { "fno": "06815050", "articleId": "12OmNyXMQcU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/asia/2009/3910/0/3910a202", "title": "Appearance-Based Subspace Projection Techniques for Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/asia/2009/3910a202/12OmNANTAyx", "parentPublication": { "id": "proceedings/asia/2009/3910/0", "title": "2009 International Asia Symposium on Intelligent Interaction and Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2015/7079/0/07169847", "title": "Preserving image color appearance on non-white projection surfaces", "doi": null, "abstractUrl": "/proceedings-article/icmew/2015/07169847/12OmNAYGlu6", "parentPublication": { "id": "proceedings/icmew/2015/7079/0", "title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761601", "title": "Calibration of projector-camera systems from virtual mutual projection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dpvt/2006/2825/0/04155766", "title": "3D from Line Segments in Two Poorly-Textured, Uncalibrated Images", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2006/04155766/12OmNvzJG7Z", "parentPublication": { "id": "proceedings/3dpvt/2006/2825/0", "title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460060", "title": "Appearance control for human material perception manipulation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460060/12OmNwE9OV7", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2013/4990/0/4990a918", "title": "Projection Based Real-Time Material Appearance Manipulation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2013/4990a918/12OmNz6iOsX", "parentPublication": { "id": "proceedings/cvprw/2013/4990/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2013/4983/0/4983a210", "title": "Exploiting Color Constancy for Compensating Projected Images on Non-white Light Projection Screen", "doi": null, "abstractUrl": "/proceedings-article/crv/2013/4983a210/12OmNzYwc4C", "parentPublication": { "id": "proceedings/crv/2013/4983/0", "title": "2013 International Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/1/01315067", "title": "Making one object look like another: controlling appearance using a projector-camera system", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315067/12OmNzcPAjA", "parentPublication": { "id": "proceedings/cvpr/2004/2158/1", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a750", "title": "Perceptually-Based Optimization for Radiometric Projector Compensation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a750/1CJd3VypH7G", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/03/08827959", "title": "Robust Reflectance Estimation for Projection-Based Appearance Control in a Dynamic Light Environment", "doi": null, "abstractUrl": "/journal/tg/2021/03/08827959/1ddblRDPV8Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzSh1bf", "title": "Proceedings VIS 2001. Visualization 2001", "acronym": "visual", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNzDehdU", "doi": "10.1109/VISUAL.2001.964509", "title": "Dynamic shadow removal from front projection displays", "normalizedTitle": "Dynamic shadow removal from front projection displays", "abstract": "Front-projection display environments suffer from a fundamental problem: users and other objects in the environment can easily and inadvertently block projectors, creating shadows on the displayed image. We introduce a technique that detects and corrects transient shadows in a multi-projector display. Our approach is to minimize the difference between predicted (generated) and observed (camera) images by continuous modification of the projected image values for each display device. We speculate that the general predictive monitoring framework introduced here is capable of addressing more general radiometric consistency problems. Using an automatically-derived relative position of cameras and projectors in the display environment and a straightforward color correction scheme, the system renders an expected image for each camera location. Cameras observe the displayed image, which is compared with the expected image to detect shadowed regions. These regions are transformed to the appropriate projector frames, where corresponding pixel values are increased. In display regions where more than one projector contributes to the image, shadow regions are eliminated. We demonstrate an implementation of the technique in a multiprojector system.", "abstracts": [ { "abstractType": "Regular", "content": "Front-projection display environments suffer from a fundamental problem: users and other objects in the environment can easily and inadvertently block projectors, creating shadows on the displayed image. We introduce a technique that detects and corrects transient shadows in a multi-projector display. Our approach is to minimize the difference between predicted (generated) and observed (camera) images by continuous modification of the projected image values for each display device. We speculate that the general predictive monitoring framework introduced here is capable of addressing more general radiometric consistency problems. Using an automatically-derived relative position of cameras and projectors in the display environment and a straightforward color correction scheme, the system renders an expected image for each camera location. Cameras observe the displayed image, which is compared with the expected image to detect shadowed regions. These regions are transformed to the appropriate projector frames, where corresponding pixel values are increased. In display regions where more than one projector contributes to the image, shadow regions are eliminated. We demonstrate an implementation of the technique in a multiprojector system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Front-projection display environments suffer from a fundamental problem: users and other objects in the environment can easily and inadvertently block projectors, creating shadows on the displayed image. We introduce a technique that detects and corrects transient shadows in a multi-projector display. Our approach is to minimize the difference between predicted (generated) and observed (camera) images by continuous modification of the projected image values for each display device. We speculate that the general predictive monitoring framework introduced here is capable of addressing more general radiometric consistency problems. Using an automatically-derived relative position of cameras and projectors in the display environment and a straightforward color correction scheme, the system renders an expected image for each camera location. Cameras observe the displayed image, which is compared with the expected image to detect shadowed regions. These regions are transformed to the appropriate projector frames, where corresponding pixel values are increased. In display regions where more than one projector contributes to the image, shadow regions are eliminated. We demonstrate an implementation of the technique in a multiprojector system.", "fno": "00964509", "keywords": [ "Optical Projectors", "Realistic Images", "Image Processing", "Dynamic Shadow Removal", "Front Projection Displays", "Minimization", "Predictive Monitoring Framework", "Projector Frames", "Multiprojector System", "Cameras", "Radiometry", "Calibration", "Computer Displays", "Color", "Rendering Computer Graphics", "Computer Science", "Monitoring", "Reflection", "Temperature" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Kentucky Univ., Lexington, KY, USA", "fullName": "C. Jaynes", "givenName": "C.", "surname": "Jaynes", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Kentucky Univ., Lexington, KY, USA", "fullName": "S. Webb", "givenName": "S.", "surname": "Webb", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Kentucky Univ., Lexington, KY, USA", "fullName": "R.M. Steele", "givenName": "R.M.", "surname": "Steele", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Kentucky Univ., Lexington, KY, USA", "fullName": "M. Brown", "givenName": "M.", "surname": "Brown", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Kentucky Univ., Lexington, KY, USA", "fullName": "W.B. Seales", "givenName": "W.B.", "surname": "Seales", "__typename": "ArticleAuthorType" } ], "idPrefix": "visual", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-01-01T00:00:00", "pubType": "proceedings", "pages": "175-555", "year": "2001", "issn": null, "isbn": "0-7803-7201-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00964503", "articleId": "12OmNzwZ6wa", "__typename": "AdjacentArticleType" }, "next": { "fno": "00964520", "articleId": "12OmNAR1aX5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/1999/5897/0/58970026", "title": "Multi-Projector Displays Using Camera-Based Registration", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970026/12OmNAfy7KW", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04270468", "title": "Shadow Removal in Front Projection Environments Using Object Tracking", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270468/12OmNqNXEqS", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2003/1900/2/190020513", "title": "Shadow Elimination and Occluder Light Suppression for Multi-Projector Displays", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2003/190020513/12OmNvFpEvj", "parentPublication": { "id": "proceedings/cvpr/2003/1900/2", "title": "2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2001/7200/0/7200jaynes", "title": "Dynamic Shadow Removal from Front Projection Displays", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200jaynes/12OmNxG1yQD", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2001/1272/2/127220151", "title": "Dynamic Shadow Elimination for Multi-Projector Displays", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2001/127220151/12OmNz5JCeX", "parentPublication": { "id": "proceedings/cvpr/2001/1272/2", "title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1101", "title": "Asynchronous Distributed Calibration for Scalable and Reconfigurable Multi-Projector Displays", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1101/13rRUwInvJ9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1368", "title": "Registration Techniques for Using Imperfect and Par tially Calibrated Devices in Planar Multi-Projector Displays", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1368/13rRUwInvyp", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/02/v0177", "title": "Color Nonuniformity in Projection-Based Displays: Analysis and Solutions", "doi": null, "abstractUrl": "/journal/tg/2004/02/v0177/13rRUwfI0PW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/06/ttg2009061317", "title": "Color Seamlessness in Multi-Projector Displays Using Constrained Gamut Morphing", "doi": null, "abstractUrl": "/journal/tg/2009/06/ttg2009061317/13rRUwgQpqH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/03/v0290", "title": "Camera-Based Detection and Removal of Shadows from Interactive Multiprojector Displays", "doi": null, "abstractUrl": "/journal/tg/2004/03/v0290/13rRUxZRbnR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwwMf3H", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "acronym": "ismarw", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNzEVS0M", "doi": "10.1109/ISMAR-Adjunct.2016.0062", "title": "Distributed Optimization for Shadow Removal in Spatial Augmented Reality", "normalizedTitle": "Distributed Optimization for Shadow Removal in Spatial Augmented Reality", "abstract": "This paper proposes a novel shadow removal technique for cooperative projection system based-on spatio-temporal prediction. In our previous work, we proposed a distributed feedback algorithm, which is implementable in cooperative projection environments subject to data transfer constraints between components. A weakness of this scheme is that the compensation is conducted in each pixel independently. As a result, spatio-temporal information of the environmental change cannot be utilized even if it is available. In view of this, we specifically investigate the situation where some of projectors are occluded by a moving object whose oneframe-ahead behavior is predictable. In order to remove the resulting shadow, we propose a novel error propagating scheme that is still implementable in a distributed manner, and enables us to incorporate the prediction information of the obstacle. It is demonstrated experimentally that the proposed method significantly improves the shadow removal performance comparison to the previous work.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes a novel shadow removal technique for cooperative projection system based-on spatio-temporal prediction. In our previous work, we proposed a distributed feedback algorithm, which is implementable in cooperative projection environments subject to data transfer constraints between components. A weakness of this scheme is that the compensation is conducted in each pixel independently. As a result, spatio-temporal information of the environmental change cannot be utilized even if it is available. In view of this, we specifically investigate the situation where some of projectors are occluded by a moving object whose oneframe-ahead behavior is predictable. In order to remove the resulting shadow, we propose a novel error propagating scheme that is still implementable in a distributed manner, and enables us to incorporate the prediction information of the obstacle. It is demonstrated experimentally that the proposed method significantly improves the shadow removal performance comparison to the previous work.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes a novel shadow removal technique for cooperative projection system based-on spatio-temporal prediction. In our previous work, we proposed a distributed feedback algorithm, which is implementable in cooperative projection environments subject to data transfer constraints between components. A weakness of this scheme is that the compensation is conducted in each pixel independently. As a result, spatio-temporal information of the environmental change cannot be utilized even if it is available. In view of this, we specifically investigate the situation where some of projectors are occluded by a moving object whose oneframe-ahead behavior is predictable. In order to remove the resulting shadow, we propose a novel error propagating scheme that is still implementable in a distributed manner, and enables us to incorporate the prediction information of the obstacle. It is demonstrated experimentally that the proposed method significantly improves the shadow removal performance comparison to the previous work.", "fno": "07836483", "keywords": [ "Augmented Reality", "Image Motion Analysis", "Optimisation", "Spatiotemporal Phenomena", "Shadow Removal Performance Improvement", "Obstacle Prediction Information", "Error Propagating Scheme", "One Frame Ahead Behavior", "Environmental Change", "Spatiotemporal Information", "Spatiotemporal Prediction", "Cooperative Projection System", "Spatial Augmented Reality", "Distributed Optimization", "Robustness", "Radiometry", "Cameras", "Augmented Reality", "Optimization", "Electronic Mail", "Heuristic Algorithms", "Computing Methodologies Computer Graphics Graphics Systems And Interfaces Mixed Augmented Reality" ], "authors": [ { "affiliation": null, "fullName": "Jun Tsukamoto", "givenName": "Jun", "surname": "Tsukamoto", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Daisuke Iwai", "givenName": "Daisuke", "surname": "Iwai", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kenji Kashima", "givenName": "Kenji", "surname": "Kashima", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismarw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-09-01T00:00:00", "pubType": "proceedings", "pages": "147-148", "year": "2016", "issn": null, "isbn": "978-1-5090-3740-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07836482", "articleId": "12OmNqzcvPm", "__typename": "AdjacentArticleType" }, "next": { "fno": "07836484", "articleId": "12OmNCdTeMH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2007/1179/0/04270468", "title": "Shadow Removal in Front Projection Environments Using Object Tracking", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270468/12OmNqNXEqS", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2007/1834/0/04458149", "title": "Object's Shadow Removal with Removal Validation", "doi": null, "abstractUrl": "/proceedings-article/isspit/2007/04458149/12OmNvrvjf2", "parentPublication": { "id": "proceedings/isspit/2007/1834/0", "title": "2007 IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cimca/2006/2731/0/27310037", "title": "Shadow Removal Based on Shadow Direction and Shadow Attributes", "doi": null, "abstractUrl": "/proceedings-article/cimca/2006/27310037/12OmNwcUk5p", "parentPublication": { "id": "proceedings/cimca/2006/2731/0", "title": "2006 International Conference on Computational Inteligence for Modelling Control and Automation and International Conference on Intelligent Agents Web Technologies and International Commerce (CIMCA'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2004/2191/0/01383090", "title": "Spatial Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2004/01383090/12OmNzmclzM", "parentPublication": { "id": "proceedings/ismar/2004/2191/0", "title": "Third IEEE and ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f617", "title": "Bijective Mapping Network for Shadow Removal", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f617/1H1jODjaaEE", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600b705", "title": "Fine-Context Shadow Detection using Shadow Removal", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600b705/1L8qiohbimI", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/11/08723605", "title": "Direction-Aware Spatial Context Features for Shadow Detection and Removal", "doi": null, "abstractUrl": "/journal/tp/2020/11/08723605/1aqKRiohjA4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300i577", "title": "Shadow Removal via Shadow Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300i577/1hVlckpFsu4", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900e925", "title": "From Shadow Generation to Shadow Removal", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900e925/1yeKbk8l5Ze", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09601181", "title": "Physics-Based Shadow Image Decomposition for Shadow Removal", "doi": null, "abstractUrl": "/journal/tp/2022/12/09601181/1yfWxXlOrVC", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJcAaH6aYg", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJd3VypH7G", "doi": "10.1109/VRW55335.2022.00226", "title": "Perceptually-Based Optimization for Radiometric Projector Compensation", "normalizedTitle": "Perceptually-Based Optimization for Radiometric Projector Compensation", "abstract": "Radiometric compensation techniques have been proposed to manipulate the appearance of arbitrarily textured surfaces using projectors. However, due to the limited dynamic range of the projectors, these compensation techniques often fail under bright environmental lighting or when the projection surface contains high contrast textures, resulting in clipping artifacts. To address this issue, we propose to apply a perceptually-based tone mapping technique to generate compensated projection images. The experimental results demonstrated that our approach minimizes the clipping artifacts and contrast degradation under challenging conditions.", "abstracts": [ { "abstractType": "Regular", "content": "Radiometric compensation techniques have been proposed to manipulate the appearance of arbitrarily textured surfaces using projectors. However, due to the limited dynamic range of the projectors, these compensation techniques often fail under bright environmental lighting or when the projection surface contains high contrast textures, resulting in clipping artifacts. To address this issue, we propose to apply a perceptually-based tone mapping technique to generate compensated projection images. The experimental results demonstrated that our approach minimizes the clipping artifacts and contrast degradation under challenging conditions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Radiometric compensation techniques have been proposed to manipulate the appearance of arbitrarily textured surfaces using projectors. However, due to the limited dynamic range of the projectors, these compensation techniques often fail under bright environmental lighting or when the projection surface contains high contrast textures, resulting in clipping artifacts. To address this issue, we propose to apply a perceptually-based tone mapping technique to generate compensated projection images. The experimental results demonstrated that our approach minimizes the clipping artifacts and contrast degradation under challenging conditions.", "fno": "840200a750", "keywords": [ "Brightness", "Image Texture", "Optical Projectors", "High Contrast Textures", "Clipping Artifacts", "Tone Mapping Technique", "Compensated Projection Images", "Radiometric Projector Compensation", "Bright Environmental Lighting", "Perceptually Based Optimization", "Degradation", "Three Dimensional Displays", "Conferences", "Lighting", "User Interfaces", "Dynamic Range", "Radiometry", "Computing Methodologies Computer Graphics Graphics Systems And Interfaces X 2014 Perception", "Computing Methodologies Computer Graphics Graphics Systems And Interfaces X 2014 Mixed Augmented Reality" ], "authors": [ { "affiliation": "NTT Communication Science Laboratories", "fullName": "Ryo Akiyama", "givenName": "Ryo", "surname": "Akiyama", "__typename": "ArticleAuthorType" }, { "affiliation": "NTT Communication Science Laboratories", "fullName": "Taiki Fukiage", "givenName": "Taiki", "surname": "Fukiage", "__typename": "ArticleAuthorType" }, { "affiliation": "Kyoto University,NTT Communication Science Laboratories", "fullName": "Shin'ya Nishida", "givenName": "Shin'ya", "surname": "Nishida", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "750-751", "year": "2022", "issn": null, "isbn": "978-1-6654-8402-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1CJd3OgNTb2", "name": "pvrw202284020-09757626s1-mm_840200a750.zip", "size": "58.7 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757626s1-mm_840200a750.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "840200a748", "articleId": "1CJenlXsOSQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "840200a752", "articleId": "1CJdI2bmjuM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2013/4990/0/4990a924", "title": "Practical Non-linear Photometric Projector Compensation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2013/4990a924/12OmNBQkwYN", "parentPublication": { "id": "proceedings/cvprw/2013/4990/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2015/7082/0/07177434", "title": "Painted face effect removal by a projector-camera system with dynamic ambient light adaptability", "doi": null, "abstractUrl": "/proceedings-article/icme/2015/07177434/12OmNqG0T4h", "parentPublication": { "id": "proceedings/icme/2015/7082/0", "title": "2015 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2007/3009/0/30090391", "title": "Radiometric Compensation through Inverse Light Transport", "doi": null, "abstractUrl": "/proceedings-article/pg/2007/30090391/12OmNqIzgUn", "parentPublication": { "id": "proceedings/pg/2007/3009/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459418", "title": "Radiometric compensation using stratified inverses", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459418/12OmNxFJXtd", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2005/2660/0/237230100", "title": "Radiometric Compensation in a Projector-Camera System Based Properties of Human Vision System", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2005/237230100/12OmNzVoBRi", "parentPublication": { "id": "proceedings/cvprw/2005/2660/0", "title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05) - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2006/2646/0/26460006", "title": "Robust Content-Dependent Photometric Projector Compensation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2006/26460006/12OmNzYNN6k", "parentPublication": { "id": "proceedings/cvprw/2006/2646/0", "title": "2006 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/11/07164338", "title": "Radiometric Compensation for Cooperative Distributed Multi-Projection System Through 2-DOF Distributed Control", "doi": null, "abstractUrl": "/journal/tg/2015/11/07164338/13rRUIIVlkk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/01/ttg2008010097", "title": "Real-Time Adaptive Radiometric Compensation", "doi": null, "abstractUrl": "/journal/tg/2008/01/ttg2008010097/13rRUwhpBE2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a449", "title": "Extended Depth-of-Field Projector using Learned Diffractive Optics", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a449/1MNgNe272U0", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600b899", "title": "FRESCO: Fast Radiometric Egocentric Screen Compensation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600b899/1iTvvExuDBe", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H2petWxAqI", "title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)", "acronym": "cost", "groupId": "1847867", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H2pilDW8ww", "doi": "10.1109/CoST57098.2022.00045", "title": "Review of Photometric Compensation in Projection System", "normalizedTitle": "Review of Photometric Compensation in Projection System", "abstract": "With the continuous development and advancement of computer vision related technologies and the improvement in projector performance in recent years, people are using projectors more and more widely in various scenarios of life and replacing traditional screens for various natural surfaces. However, when the projection surface is no longer the ideal white projection curtain, the projection effect is often greatly compromised, especially when there is color texture on the projection surface. In order to achieve a good perception of the projected image of some complex scenes, a series of compensation processes are needed for the projected image. This paper reviews the projection compensation techniques based on projector-camera systems that have emerged in recent years such as color mixing matrix, thin plate spline, and deep learning. An experimental comparison of the various compensation techniques is also presented.", "abstracts": [ { "abstractType": "Regular", "content": "With the continuous development and advancement of computer vision related technologies and the improvement in projector performance in recent years, people are using projectors more and more widely in various scenarios of life and replacing traditional screens for various natural surfaces. However, when the projection surface is no longer the ideal white projection curtain, the projection effect is often greatly compromised, especially when there is color texture on the projection surface. In order to achieve a good perception of the projected image of some complex scenes, a series of compensation processes are needed for the projected image. This paper reviews the projection compensation techniques based on projector-camera systems that have emerged in recent years such as color mixing matrix, thin plate spline, and deep learning. An experimental comparison of the various compensation techniques is also presented.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the continuous development and advancement of computer vision related technologies and the improvement in projector performance in recent years, people are using projectors more and more widely in various scenarios of life and replacing traditional screens for various natural surfaces. However, when the projection surface is no longer the ideal white projection curtain, the projection effect is often greatly compromised, especially when there is color texture on the projection surface. In order to achieve a good perception of the projected image of some complex scenes, a series of compensation processes are needed for the projected image. This paper reviews the projection compensation techniques based on projector-camera systems that have emerged in recent years such as color mixing matrix, thin plate spline, and deep learning. An experimental comparison of the various compensation techniques is also presented.", "fno": "624800a180", "keywords": [ "Cameras", "Computer Vision", "Image Colour Analysis", "Optical Projectors", "Splines Mathematics", "Replacing Traditional Screens", "Natural Surfaces", "Projection Surface", "Ideal White Projection Curtain", "Projection Effect", "Projected Image", "Compensation Processes", "Projection Compensation Techniques", "Projector Camera Systems", "Photometric Compensation", "Projection System", "Continuous Development", "Computer Vision Related Technologies", "Projector Performance", "Projectors", "Deep Learning", "Image Color Analysis", "Neural Networks", "Cameras", "Real Time Systems", "Surface Texture", "Splines Mathematics", "Photometric Compensation", "Geometric Correction", "Color Mixing Matrix", "Thin Plate Spline TPS", "Deep Learning" ], "authors": [ { "affiliation": "Communication University of China,Beijing,China", "fullName": "Zongxuan Shi", "givenName": "Zongxuan", "surname": "Shi", "__typename": "ArticleAuthorType" }, { "affiliation": "Communication University of China,Beijing,China", "fullName": "Lipi Niu", "givenName": "Lipi", "surname": "Niu", "__typename": "ArticleAuthorType" }, { "affiliation": "Communication University of China,Beijing,China", "fullName": "Yanming Zhao", "givenName": "Yanming", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "Communication University of China,Beijing,China", "fullName": "Shaobin Li", "givenName": "Shaobin", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "cost", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "180-185", "year": "2022", "issn": null, "isbn": "978-1-6654-6248-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "624800a174", "articleId": "1H2pngCvpcc", "__typename": "AdjacentArticleType" }, "next": { "fno": "624800a186", "articleId": "1H2pn5E6Upy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ism/2017/2937/0/2937a114", "title": "Detecting Good Surface for Improvisatory Visual Projection", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a114/12OmNCd2roE", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2017/3050/0/08217860", "title": "A 3D recognition and projection system for meridians and acupoints", "doi": null, "abstractUrl": "/proceedings-article/bibm/2017/08217860/12OmNqEAT6A", "parentPublication": { "id": "proceedings/bibm/2017/3050/0", "title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a206", "title": "[POSTER] Geometric Mapping for Color Compensation Using Scene Adaptive Patches", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a206/12OmNzmclRZ", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007248", "title": "Simultaneous Projection and Positioning of Laser Projector Pixels", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007248/13rRUxASupD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08497080", "title": "Seamless Multi-Projection Revisited", "doi": null, "abstractUrl": "/journal/tg/2018/11/08497080/14M3DZx5FZe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a135", "title": "CompenHR: Efficient Full Compensation for High-resolution Projector", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a135/1MNgmceltOU", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08821571", "title": "Animated Stickies: Fast Video Projection Mapping onto a Markerless Plane through a Direct Closed-Loop Alignment", "doi": null, "abstractUrl": "/journal/tg/2019/11/08821571/1d6xCnoQsU0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300g803", "title": "End-To-End Projector Photometric Compensation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300g803/1gyraGpeGyY", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h164", "title": "CompenNet++: End-to-End Full Projector Compensation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h164/1hQqyKnlCEM", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/06/09318552", "title": "End-to-End Full Projector Compensation", "doi": null, "abstractUrl": "/journal/tp/2022/06/09318552/1qdT3YKBd5u", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy9Prj1", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNvD8RuE", "doi": "10.1109/ICCVW.2017.112", "title": "3D Scene Mesh from CNN Depth Predictions and Sparse Monocular SLAM", "normalizedTitle": "3D Scene Mesh from CNN Depth Predictions and Sparse Monocular SLAM", "abstract": "In this paper, we propose a novel framework for integrating geometrical measurements of monocular visual simultaneous localization and mapping (SLAM) and depth prediction using a convolutional neural network (CNN). In our framework, SLAM-measured sparse features and CNN-predicted dense depth maps are fused to obtain a more accurate dense 3D reconstruction including scale. We continuously update an initial 3D mesh by integrating accurately tracked sparse features points. Compared to prior work on integrating SLAM and CNN estimates [26], there are two main differences: Using a 3D mesh representation allows as-rigid-as-possible update transformations. We further propose a system architecture suitable for mobile devices, where feature tracking and CNN-based depth prediction modules are separated, and only the former is run on the device. We evaluate the framework by comparing the 3D reconstruction result with 3D measurements obtained using an RGBD sensor, showing a reduction in the mean residual error of 38% compared to CNN-based depth map prediction alone.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a novel framework for integrating geometrical measurements of monocular visual simultaneous localization and mapping (SLAM) and depth prediction using a convolutional neural network (CNN). In our framework, SLAM-measured sparse features and CNN-predicted dense depth maps are fused to obtain a more accurate dense 3D reconstruction including scale. We continuously update an initial 3D mesh by integrating accurately tracked sparse features points. Compared to prior work on integrating SLAM and CNN estimates [26], there are two main differences: Using a 3D mesh representation allows as-rigid-as-possible update transformations. We further propose a system architecture suitable for mobile devices, where feature tracking and CNN-based depth prediction modules are separated, and only the former is run on the device. We evaluate the framework by comparing the 3D reconstruction result with 3D measurements obtained using an RGBD sensor, showing a reduction in the mean residual error of 38% compared to CNN-based depth map prediction alone.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a novel framework for integrating geometrical measurements of monocular visual simultaneous localization and mapping (SLAM) and depth prediction using a convolutional neural network (CNN). In our framework, SLAM-measured sparse features and CNN-predicted dense depth maps are fused to obtain a more accurate dense 3D reconstruction including scale. We continuously update an initial 3D mesh by integrating accurately tracked sparse features points. Compared to prior work on integrating SLAM and CNN estimates [26], there are two main differences: Using a 3D mesh representation allows as-rigid-as-possible update transformations. We further propose a system architecture suitable for mobile devices, where feature tracking and CNN-based depth prediction modules are separated, and only the former is run on the device. We evaluate the framework by comparing the 3D reconstruction result with 3D measurements obtained using an RGBD sensor, showing a reduction in the mean residual error of 38% compared to CNN-based depth map prediction alone.", "fno": "1034a912", "keywords": [ "Simultaneous Localization And Mapping", "Three Dimensional Displays", "Cameras", "Strain", "Mobile Handsets", "Surface Reconstruction" ], "authors": [ { "affiliation": null, "fullName": "Tomoyuki Mukasa", "givenName": "Tomoyuki", "surname": "Mukasa", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jiu Xu", "givenName": "Jiu", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Stenger Bjorn", "givenName": "Stenger", "surname": "Bjorn", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "912-919", "year": "2017", "issn": "2473-9944", "isbn": "978-1-5386-1034-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "1034a904", "articleId": "12OmNyL0ThS", "__typename": "AdjacentArticleType" }, "next": { "fno": "1034a920", "articleId": "12OmNyL0Tim", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2017/1034/0/1034c408", "title": "Edge SLAM: Edge Points Based Monocular Visual SLAM", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034c408/12OmNCb3frz", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a452", "title": "Planes Detection for Robust Localization and Mapping in RGB-D SLAM Systems", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a452/12OmNqH9hdY", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223411", "title": "Zoom factor compensation for monocular SLAM", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223411/12OmNzJbQUU", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457g565", "title": "CNN-SLAM: Real-Time Dense Monocular SLAM with Learned Depth Prediction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457g565/12OmNzTH0Qa", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486548", "title": "Dense Reconstruction from Monocular Slam with Fusion of Sparse Map-Points and Cnn-Inferred Depth", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486548/14jQfP7ey4y", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500a031", "title": "EGO-SLAM: A Robust Monocular SLAM for Egocentric Videos", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500a031/18j8QSyEfja", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956576", "title": "Joint Self-Supervised Monocular Depth Estimation and SLAM", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956576/1IHpbIpwRfW", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600d096", "title": "Probabilistic Volumetric Fusion for Dense Monocular SLAM", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600d096/1L8qEHGGTlu", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300a134", "title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300a134/1gyr8GIX9E4", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a101", "title": "Improved ORB-SLAM Based 3D Dense Reconstruction for Monocular Endoscopic Image", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a101/1vg8aYQPZi8", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx2QUDD", "title": "2015 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNvm6VJS", "doi": "10.1109/3DV.2015.75", "title": "Reconstructing Street-Scenes in Real-Time from a Driving Car", "normalizedTitle": "Reconstructing Street-Scenes in Real-Time from a Driving Car", "abstract": "Most current approaches to street-scene 3D reconstruction from a driving car to date rely on 3D laser scanning or tedious offline computation from visual images. In this paper, we compare a real-time capable 3D reconstruction method using a stereo extension of large-scale direct SLAM (LSD-SLAM) with laser-based maps and traditional stereo reconstructions based on processing individual stereo frames. In our reconstructions, small-baseline comparison over several subsequent frames are fused with fixed-baseline disparity from the stereo camera setup. These results demonstrate that our direct SLAM technique provides an excellent compromise between speed and accuracy, generating visually pleasing and globally consistent semi-dense reconstructions of the environment in real-time on a single CPU.", "abstracts": [ { "abstractType": "Regular", "content": "Most current approaches to street-scene 3D reconstruction from a driving car to date rely on 3D laser scanning or tedious offline computation from visual images. In this paper, we compare a real-time capable 3D reconstruction method using a stereo extension of large-scale direct SLAM (LSD-SLAM) with laser-based maps and traditional stereo reconstructions based on processing individual stereo frames. In our reconstructions, small-baseline comparison over several subsequent frames are fused with fixed-baseline disparity from the stereo camera setup. These results demonstrate that our direct SLAM technique provides an excellent compromise between speed and accuracy, generating visually pleasing and globally consistent semi-dense reconstructions of the environment in real-time on a single CPU.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Most current approaches to street-scene 3D reconstruction from a driving car to date rely on 3D laser scanning or tedious offline computation from visual images. In this paper, we compare a real-time capable 3D reconstruction method using a stereo extension of large-scale direct SLAM (LSD-SLAM) with laser-based maps and traditional stereo reconstructions based on processing individual stereo frames. In our reconstructions, small-baseline comparison over several subsequent frames are fused with fixed-baseline disparity from the stereo camera setup. These results demonstrate that our direct SLAM technique provides an excellent compromise between speed and accuracy, generating visually pleasing and globally consistent semi-dense reconstructions of the environment in real-time on a single CPU.", "fno": "8332a607", "keywords": [ "Cameras", "Three Dimensional Displays", "Image Reconstruction", "Real Time Systems", "Tracking", "Simultaneous Localization And Mapping" ], "authors": [ { "affiliation": null, "fullName": "Vladyslav Usenko", "givenName": "Vladyslav", "surname": "Usenko", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jakob Engel", "givenName": "Jakob", "surname": "Engel", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jorg Stuckler", "givenName": "Jorg", "surname": "Stuckler", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Daniel Cremers", "givenName": "Daniel", "surname": "Cremers", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-10-01T00:00:00", "pubType": "proceedings", "pages": "607-614", "year": "2015", "issn": null, "isbn": "978-1-4673-8332-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "8332a598", "articleId": "12OmNyeWdHH", "__typename": "AdjacentArticleType" }, "next": { "fno": "8332a615", "articleId": "12OmNqN6R8z", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2017/2610/0/261001a048", "title": "High Dynamic Range SLAM with Map-Aware Exposure Time Control", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a048/12OmNrMZpjV", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206769", "title": "Visual loop closing using multi-resolution SIFT grids in metric-topological SLAM", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206769/12OmNyuPKYu", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2017/2610/0/261001a185", "title": "Relative Camera Refinement for Accurate Dense Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a185/12OmNzmtWvT", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asap/2018/7479/0/08445128", "title": "Enhanced Visual Loop Closing for Laser-Based SLAM", "doi": null, "abstractUrl": "/proceedings-article/asap/2018/08445128/13bd1eTtWYW", "parentPublication": { "id": "proceedings/asap/2018/7479/0", "title": "2018 IEEE 29th International Conference on Application-specific Systems, Architectures and Processors (ASAP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2019/9245/0/924500a417", "title": "Point-Cloud Mapping and Merging Using Mobile Laser Scanner", "doi": null, "abstractUrl": "/proceedings-article/irc/2019/924500a417/18M7dgLsWCk", "parentPublication": { "id": "proceedings/irc/2019/9245/0", "title": "2019 Third IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a574", "title": "Mobile Photometric Stereo with Keypoint-Based SLAM for Dense 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a574/1ezREwjZfFe", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2020/9891/0/09108673", "title": "Towards Richer 3D Reference Maps in Urban Scenes", "doi": null, "abstractUrl": "/proceedings-article/crv/2020/09108673/1kpIDVmwE5G", "parentPublication": { "id": "proceedings/crv/2020/9891/0", "title": "2020 17th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700b760", "title": "SLAM in the Field: An Evaluation of Monocular Mapping and Localization on Challenging Dynamic Agricultural Environment", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700b760/1uqGnyhqL6g", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a287", "title": "A novel SLAM method for laparoscopic scene reconstruction with feature patch tracking", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a287/1vg7UqDNZIY", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800b362", "title": "DSP-SLAM: Object Oriented SLAM with Deep Shape Priors", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800b362/1zWEhxN28YU", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1IHotVZum6Q", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "9956007", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1IHpbIpwRfW", "doi": "10.1109/ICPR56361.2022.9956576", "title": "Joint Self-Supervised Monocular Depth Estimation and SLAM", "normalizedTitle": "Joint Self-Supervised Monocular Depth Estimation and SLAM", "abstract": "Classical monocular Simultaneous Localization and Mapping (SLAM) and convolutional neural networks (CNNs) based monocular depth estimation represent two different methods towards reconstructing the 3D geometry of the scene. In this paper, we leverage SLAM and depth estimation for their respective advantages to further improve the performance of both tasks. For SLAM, running pseudo RGBD-SLAM with CNN-predicted depths improves the accuracy of visual odometry and mapping compared with the monocular SLAM baseline. For depth estimation, we use 3D scene structures from geometric SLAM to refine the pre-trained monocular depth estimation network to update the model which did not reach the optimum due to the photometric inconsistency. Moreover, the proposed method incorporates an optional Sparse Auxiliary Network [1] into the original depth estimation network, from which the sparse depth features are dynamically combined with RGB features for predicting the depth map. Experimental results on KITTI and TUM RGB-D datasets show that our method achieves state-of-the-art performances on both depth prediction and pose estimation tasks.", "abstracts": [ { "abstractType": "Regular", "content": "Classical monocular Simultaneous Localization and Mapping (SLAM) and convolutional neural networks (CNNs) based monocular depth estimation represent two different methods towards reconstructing the 3D geometry of the scene. In this paper, we leverage SLAM and depth estimation for their respective advantages to further improve the performance of both tasks. For SLAM, running pseudo RGBD-SLAM with CNN-predicted depths improves the accuracy of visual odometry and mapping compared with the monocular SLAM baseline. For depth estimation, we use 3D scene structures from geometric SLAM to refine the pre-trained monocular depth estimation network to update the model which did not reach the optimum due to the photometric inconsistency. Moreover, the proposed method incorporates an optional Sparse Auxiliary Network [1] into the original depth estimation network, from which the sparse depth features are dynamically combined with RGB features for predicting the depth map. Experimental results on KITTI and TUM RGB-D datasets show that our method achieves state-of-the-art performances on both depth prediction and pose estimation tasks.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Classical monocular Simultaneous Localization and Mapping (SLAM) and convolutional neural networks (CNNs) based monocular depth estimation represent two different methods towards reconstructing the 3D geometry of the scene. In this paper, we leverage SLAM and depth estimation for their respective advantages to further improve the performance of both tasks. For SLAM, running pseudo RGBD-SLAM with CNN-predicted depths improves the accuracy of visual odometry and mapping compared with the monocular SLAM baseline. For depth estimation, we use 3D scene structures from geometric SLAM to refine the pre-trained monocular depth estimation network to update the model which did not reach the optimum due to the photometric inconsistency. Moreover, the proposed method incorporates an optional Sparse Auxiliary Network [1] into the original depth estimation network, from which the sparse depth features are dynamically combined with RGB features for predicting the depth map. Experimental results on KITTI and TUM RGB-D datasets show that our method achieves state-of-the-art performances on both depth prediction and pose estimation tasks.", "fno": "09956576", "keywords": [ "Distance Measurement", "Geometry", "Image Colour Analysis", "Image Motion Analysis", "Image Reconstruction", "Learning Artificial Intelligence", "Mobile Robots", "Neural Nets", "Pose Estimation", "Robot Vision", "SLAM Robots", "Classical Monocular Simultaneous Localization", "CNN Predicted Depths", "Convolutional Neural Networks", "Depth Map", "Depth Prediction", "Estimation Tasks", "Geometric SLAM", "Joint Self Supervised Monocular Depth Estimation", "Leverage SLAM", "Monocular SLAM Baseline", "Optional Sparse Auxiliary Network", "Original Depth Estimation Network", "Pre Trained Monocular Depth Estimation Network", "Sparse Depth Features", "Solid Modeling", "Simultaneous Localization And Mapping", "Three Dimensional Displays", "Pose Estimation", "Feature Extraction", "Cameras", "Pattern Recognition" ], "authors": [ { "affiliation": "Chinese Academy of Sciences,Institute of Automation,Beijing,China", "fullName": "Xiaoxia Xing", "givenName": "Xiaoxia", "surname": "Xing", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,Institute of Automation,Beijing,China", "fullName": "Yinghao Cai", "givenName": "Yinghao", "surname": "Cai", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,Institute of Automation,Beijing,China", "fullName": "Tao Lu", "givenName": "Tao", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,Institute of Automation,Beijing,China", "fullName": "Yiping Yang", "givenName": "Yiping", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,Institute of Automation,Beijing,China", "fullName": "Dayong Wen", "givenName": "Dayong", "surname": "Wen", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "4030-4036", "year": "2022", "issn": null, "isbn": "978-1-6654-9062-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09956579", "articleId": "1IHpMRBlt04", "__typename": "AdjacentArticleType" }, "next": { "fno": "09956493", "articleId": "1IHpH9oN7MI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icarsc/2016/2255/0/07781977", "title": "Indoor SLAM for Micro Aerial Vehicles Control Using Monocular Camera and Sensor Fusion", "doi": null, "abstractUrl": "/proceedings-article/icarsc/2016/07781977/12OmNCfjesr", "parentPublication": { "id": "proceedings/icarsc/2016/2255/0", "title": "2016 International Conference on Autonomous Robot Systems and Competitions (ICARSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034a912", "title": "3D Scene Mesh from CNN Depth Predictions and Sparse Monocular SLAM", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034a912/12OmNvD8RuE", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2018/5114/0/511401a350", "title": "Monocular SLAM Algorithm Based on Improved Depth Map Estimation and Keyframe Selection", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2018/511401a350/12OmNyeECAZ", "parentPublication": { "id": "proceedings/icmtma/2018/5114/0", "title": "2018 10th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457g565", "title": "CNN-SLAM: Real-Time Dense Monocular SLAM with Learned Depth Prediction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457g565/12OmNzTH0Qa", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08546049", "title": "Scalable Monocular SLAM by Fusing and Connecting Line Segments with Inverse Depth Filter", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08546049/17D45WHONrT", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545173", "title": "Em-SLAM: a Fast and Robust Monocular SLAM Method for Embedded Systems", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545173/17D45XdBRQs", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hdis/2022/9144/0/09991394", "title": "Pseudo Depth Maps for RGB-D SLAM", "doi": null, "abstractUrl": "/proceedings-article/hdis/2022/09991394/1JwQ1uhFF4s", "parentPublication": { "id": "proceedings/hdis/2022/9144/0", "title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600d096", "title": "Probabilistic Volumetric Fusion for Dense Monocular SLAM", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600d096/1L8qEHGGTlu", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300a803", "title": "Estimation of Absolute Scale in Monocular SLAM Using Synthetic Data", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300a803/1i5mGYlfA2s", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a101", "title": "Improved ORB-SLAM Based 3D Dense Reconstruction for Monocular Endoscopic Image", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a101/1vg8aYQPZi8", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1zWE36wtuCY", "title": "2021 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1zWEhxN28YU", "doi": "10.1109/3DV53792.2021.00143", "title": "DSP-SLAM: Object Oriented SLAM with Deep Shape Priors", "normalizedTitle": "DSP-SLAM: Object Oriented SLAM with Deep Shape Priors", "abstract": "We propose DSP-SLAM, an object-oriented SLAM system that builds a rich and accurate joint map of dense 3D models for foreground objects, and sparse landmark points to represent the background. DSP-SLAM takes as input the 3D point cloud reconstructed by a feature-based SLAM system and equips it with the ability to enhance its sparse map with dense reconstructions of detected objects. Objects are detected via semantic instance segmentation, and their shape and pose are estimated using category-specific deep shape embeddings as priors, via a novel second order optimization. Our object-aware bundle adjustment builds a pose-graph to jointly optimize camera poses, object locations and feature points. DSP-SLAM can operate at 10 frames per second on 3 different input modalities: monocular, stereo, or stereo&#x002B;LiDAR. We demonstrate DSP-SLAM operating at almost frame rate on monocular-RGB sequences from the Friburg and Redwood-OS datasets, and on stereo&#x002B;LiDAR sequences on the KITTI odometry dataset showing that it achieves high-quality full object reconstructions, even from partial observations, while maintaining a consistent global map. Our evaluation shows improvements in object pose and shape reconstruction with respect to recent deep prior-based reconstruction methods and reductions in camera tracking drift on the KITTI dataset. More details and demonstrations are available at our project page: https://jingwenwang95.github.io/dsp-slam/", "abstracts": [ { "abstractType": "Regular", "content": "We propose DSP-SLAM, an object-oriented SLAM system that builds a rich and accurate joint map of dense 3D models for foreground objects, and sparse landmark points to represent the background. DSP-SLAM takes as input the 3D point cloud reconstructed by a feature-based SLAM system and equips it with the ability to enhance its sparse map with dense reconstructions of detected objects. Objects are detected via semantic instance segmentation, and their shape and pose are estimated using category-specific deep shape embeddings as priors, via a novel second order optimization. Our object-aware bundle adjustment builds a pose-graph to jointly optimize camera poses, object locations and feature points. DSP-SLAM can operate at 10 frames per second on 3 different input modalities: monocular, stereo, or stereo&#x002B;LiDAR. We demonstrate DSP-SLAM operating at almost frame rate on monocular-RGB sequences from the Friburg and Redwood-OS datasets, and on stereo&#x002B;LiDAR sequences on the KITTI odometry dataset showing that it achieves high-quality full object reconstructions, even from partial observations, while maintaining a consistent global map. Our evaluation shows improvements in object pose and shape reconstruction with respect to recent deep prior-based reconstruction methods and reductions in camera tracking drift on the KITTI dataset. More details and demonstrations are available at our project page: https://jingwenwang95.github.io/dsp-slam/", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose DSP-SLAM, an object-oriented SLAM system that builds a rich and accurate joint map of dense 3D models for foreground objects, and sparse landmark points to represent the background. DSP-SLAM takes as input the 3D point cloud reconstructed by a feature-based SLAM system and equips it with the ability to enhance its sparse map with dense reconstructions of detected objects. Objects are detected via semantic instance segmentation, and their shape and pose are estimated using category-specific deep shape embeddings as priors, via a novel second order optimization. Our object-aware bundle adjustment builds a pose-graph to jointly optimize camera poses, object locations and feature points. DSP-SLAM can operate at 10 frames per second on 3 different input modalities: monocular, stereo, or stereo+LiDAR. We demonstrate DSP-SLAM operating at almost frame rate on monocular-RGB sequences from the Friburg and Redwood-OS datasets, and on stereo+LiDAR sequences on the KITTI odometry dataset showing that it achieves high-quality full object reconstructions, even from partial observations, while maintaining a consistent global map. Our evaluation shows improvements in object pose and shape reconstruction with respect to recent deep prior-based reconstruction methods and reductions in camera tracking drift on the KITTI dataset. More details and demonstrations are available at our project page: https://jingwenwang95.github.io/dsp-slam/", "fno": "268800b362", "keywords": [ "Cameras", "Distance Measurement", "Image Reconstruction", "Image Segmentation", "Image Sequences", "Mobile Robots", "Object Detection", "Optical Radar", "Pose Estimation", "SLAM Robots", "Stereo Image Processing", "Https Jingwenwang 95 Github Io Dsp Slam", "Deep Prior Based Reconstruction", "Partial Observation", "High Quality Full Object Reconstructions", "KITTI Odometry Dataset", "Stereo Li DAR Sequences", "Redwood OS Datasets", "Friburg Datasets", "Monocular RGB Sequences", "Camera Poses", "Estimated Pose", "Estimated Shape", "Semantic Instance Segmentation", "Detected Objects", "Dense Reconstructions", "3 D Point Cloud Reconstructed", "Different Input Modalities", "Prior Based Reconstruction Methods", "DSP SLAM Operating", "Object Locations", "Object Aware Bundle Adjustment", "Category Specific Deep Shape Embeddings", "Feature Based SLAM System", "Sparse Landmark Points", "Foreground Objects", "Dense 3 D Models", "Accurate Joint Map", "Rich Joint Map", "Object Oriented SLAM System", "Deep Shape Priors", "Point Cloud Compression", "Solid Modeling", "Simultaneous Localization And Mapping", "Shape", "Semantics", "Reconstruction Algorithms", "Cameras" ], "authors": [ { "affiliation": "University College London,Department of Computer Science", "fullName": "Jingwen Wang", "givenName": "Jingwen", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London,Department of Computer Science", "fullName": "Martin Rünz", "givenName": "Martin", "surname": "Rünz", "__typename": "ArticleAuthorType" }, { "affiliation": "University College London,Department of Computer Science", "fullName": "Lourdes Agapito", "givenName": "Lourdes", "surname": "Agapito", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-12-01T00:00:00", "pubType": "proceedings", "pages": "1362-1371", "year": "2021", "issn": null, "isbn": "978-1-6654-2688-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "268800b351", "articleId": "1zWEfYNkrg4", "__typename": "AdjacentArticleType" }, "next": { "fno": "268800b372", "articleId": "1zWEcKoOcQ8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/crv/2016/2491/0/2491a456", "title": "Texture-Aware SLAM Using Stereo Imagery and Inertial Information", "doi": null, "abstractUrl": "/proceedings-article/crv/2016/2491a456/12OmNx1IwdI", "parentPublication": { "id": "proceedings/crv/2016/2491/0", "title": "2016 13th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2018/4886/0/488601b001", "title": "Detect-SLAM: Making Object Detection and SLAM Mutually Beneficial", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601b001/12OmNxd4tBP", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceic/2021/0212/0/021200a081", "title": "IDMC-VSLAM: Improved dense map construction and visual SLAM in dynamic environments", "doi": null, "abstractUrl": "/proceedings-article/icceic/2021/021200a081/1AFsC5XRpMk", "parentPublication": { "id": "proceedings/icceic/2021/0212/0", "title": "2021 2nd International Conference on Computer Engineering and Intelligent Control (ICCEIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccpqt/2022/7020/0/702000a053", "title": "Research on Vision-based Semantic SLAM towards Indoor Dynamic Environment", "doi": null, "abstractUrl": "/proceedings-article/ccpqt/2022/702000a053/1Iiu5DpyPOU", "parentPublication": { "id": "proceedings/ccpqt/2022/7020/0", "title": "2022 International Conference on Computing, Communication, Perception and Quantum Technology (CCPQT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hdis/2022/9144/0/09991394", "title": "Pseudo Depth Maps for RGB-D SLAM", "doi": null, "abstractUrl": "/proceedings-article/hdis/2022/09991394/1JwQ1uhFF4s", "parentPublication": { "id": "proceedings/hdis/2022/9144/0", "title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icica/2022/9011/0/901100a073", "title": "MN-SLAM: Multi-networks Visual SLAM for Dynamic and Complicated Environments", "doi": null, "abstractUrl": "/proceedings-article/icica/2022/901100a073/1LKxbryd1CM", "parentPublication": { "id": "proceedings/icica/2022/9011/0", "title": "2022 11th International Conference on Information Communication and Applications (ICICA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccgiv/2022/9250/0/925000a001", "title": "A novel Loop Closure Approach in 2D LIDAR SLAM", "doi": null, "abstractUrl": "/proceedings-article/iccgiv/2022/925000a001/1Lxfozxuous", "parentPublication": { "id": "proceedings/iccgiv/2022/9250/0", "title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a574", "title": "Mobile Photometric Stereo with Keypoint-Based SLAM for Dense 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a574/1ezREwjZfFe", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2020/9231/0/923100a242", "title": "VEM-SLAM - Virtual Environment Modelling through SLAM", "doi": null, "abstractUrl": "/proceedings-article/svr/2020/923100a242/1oZBEeT7CqA", "parentPublication": { "id": "proceedings/svr/2020/9231/0", "title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412341", "title": "Learning to Segment Dynamic Objects using SLAM Outliers", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412341/1tmjm0W44yQ", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }